From f9c38f797c092c979c531f6ed75bd60f17529c6b Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 18 Apr 2017 14:27:51 +0100 Subject: [PATCH 0001/1367] Add wind downscaling algorithm --- lib/improver/constants.py | 34 + ...st_wind_downscaling_roughnesscorrection.py | 749 ++++++++++++++ lib/improver/wind_downscaling.py | 964 ++++++++++++++++++ 3 files changed, 1747 insertions(+) create mode 100644 lib/improver/constants.py create mode 100644 lib/improver/tests/test_wind_downscaling_roughnesscorrection.py create mode 100644 lib/improver/wind_downscaling.py diff --git a/lib/improver/constants.py b/lib/improver/constants.py new file mode 100644 index 0000000000..b02eb31917 --- /dev/null +++ b/lib/improver/constants.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module to contain generally useful constants.""" + +# Real Missing Data Indicator +RMDI = -32767.0 diff --git a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py new file mode 100644 index 0000000000..ac702b8a22 --- /dev/null +++ b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py @@ -0,0 +1,749 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for plugin wind_downscaling.RoughnessCorrection.""" + +import unittest + + +from cf_units import Unit +import iris +from iris.coords import AuxCoord +from iris.tests import IrisTest +import numpy as np + +from improver.grids.osgb import OSGBGRID +from improver.constants import RMDI +from improver.wind_downscaling import RoughnessCorrection + + +def set_up_cube(num_time_points=1, num_grid_points=1, num_height_levels=7, + data=None, name=None, unit=None, height=None): + """Set up a normal OSGB UK National Grid cube.""" + cubel = iris.cube.CubeList() + tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian") + t_0 = 402192.5 + if isinstance(num_grid_points, int): + num_grid_points_x = num_grid_points_y = num_grid_points + else: + num_grid_points_x = num_grid_points[0] + num_grid_points_y = num_grid_points[1] + for i_idx in range(num_height_levels): + cubel1 = iris.cube.CubeList() + for j_idx in range(num_time_points): + cube = OSGBGRID + cube = cube[:num_grid_points_x, :num_grid_points_y] + cube.add_aux_coord(AuxCoord(t_0 + j_idx, "time", units=tunit)) + if height is None: + cube.add_aux_coord(AuxCoord(i_idx, "model_level_number")) + elif isinstance(height, float) or isinstance(height, int): + cube.add_aux_coord( + AuxCoord(height, "height", units=Unit("meter"))) + else: + cube.add_aux_coord( + AuxCoord(height[i_idx], "height", units=Unit("meter"))) + cubel1.append(cube) + cubel.append(cubel1.merge()[0]) + cubel = cubel.merge(0) + cube = cubel[0] + if data is not None: + try: + data = np.array(data) + cube.data = data.reshape(cube.data.shape) + except ValueError as ex: + if ex.message == "total size of new array must be unchanged": + msg = ("supplied data does not fit the cube." + "cube dimensions: {} vs. supplied data {}") + raise ValueError(msg.format(cube.shape, data.shape)) + else: + raise ValueError(ex) + + if name is not None: + try: + cube.standard_name = name + except ValueError as ex: + msg = "error trying to set the supplied name as cube data name: " + raise ValueError(msg + ex.message) + except TypeError as ex: + msg = ("error trying to set the supplied name as cube data name: " + "the name should be string and have a valid variable name ") + raise ValueError(msg + ex.message) + if unit is not None: + try: + cube.units = Unit(unit) + except ValueError as ex: + msg = "error trying to set Units to cube. supplied unit: {}" + raise ValueError(msg.format(unit)) + return cube + + +class TestMultiPoint(object): + + """Test (typically) 3 x 1 or 3x3 point tests. + + The size can be set by nxny, which is either a scalar or an + np.array([x,y]). It constructs cubes for the ancillary fields + Silhouette roughness (AoS), standard deviation of model height grid + cell (Sigma), vegetative roughness (z_0), post-processing grid + orography (pporog) and model orography (modelorog). If no values + are supplied, the grids that are set up have equal values at all + x-y points: AoS = 0.2, Sigma = 20, z_0 = 0.2, pporog = 250, + modelorog = 230. + + """ + + def __init__(self, nx_ny=3, AoS=None, Sigma=None, z_0=0.2, pporog=None, + modelorog=None): + """Set up multi-point tests. + + Parameters + ---------- + nxny: a scalar or an np.array([x,y]) + Sets dimension for tests. + AoS: float or 1D or 2D array + Silhouette roughness field + Sigma: float or 1D or 2D array + Standard deviation field of height in grid cell + z_0: float or 1D or 2D array + Vegetative roughness field + pporog: float or 1D or 2D array + Unsmoothed orography field on post-processing grid + modelorog: float or 1D or 2D array + Model orography field on post-processing grid + + """ + if isinstance(nx_ny, int): + n_x = n_y = nx_ny + else: + n_x = nx_ny[0] + n_y = nx_ny[1] + self.n_x = n_x + self.n_y = n_y + if AoS is None: + AoS = np.ones([n_x, n_y])*0.2 + if Sigma is None: + Sigma = np.ones([n_x, n_y])*20.0 + if pporog is None: + pporog = np.ones([n_x, n_y])*250.0 + if modelorog is None: + modelorog = np.ones([n_x, n_y])*230.0 + self.w_cube = None + self.aos_cube = set_up_cube(1, [n_x, n_y], 1, data=AoS, height=0, + name=None, unit=None) + self.s_cube = set_up_cube(1, [n_x, n_y], 1, data=Sigma, height=0, + name=None, unit="m") + if z_0 is None: + self.z0_cube = None + elif isinstance(z_0, float): + z_0 = np.ones([n_x, n_y])*z_0 + self.z0_cube = set_up_cube(1, [n_x, n_y], 1, data=z_0, height=0, + name=None, unit="m") + elif isinstance(z_0, list): + z_0 = np.array(z_0) + self.z0_cube = set_up_cube(1, [n_x, n_y], 1, data=z_0, height=0, + name=None, unit="m") + self.poro_cube = set_up_cube(1, [n_x, n_y], 1, data=pporog, height=0, + name=None, unit="m") + self.moro_cube = set_up_cube( + 1, [n_x, n_y], 1, data=modelorog, height=0, name=None, unit="m") + + def test_hc_rc(self, wind, dtime=1, height=None, aslist=False): + """Function to set up a wind cube from the supplied np.array. + + Set up the wind and call the RoughnessCorrection class. If the + supplied array is 1D, it is assumed to be the height profile + and the values are copied to all x-y points and all time steps. + If the supplied array is 2D, it is assumed that the supplied + array is a function of height x time. The point is copied to + all x-y points. The first dimension should be the height + dimension. If a 3D array is supplied, the order should be + height x time x x-y-grid. If a height is supplied, it needs to + agree with the first (zeroth) dimension of the supplied wind + array. + + Parameters + ---------- + wind: 2 or 3D array + Multi-level wind target data + dtime: integer, default 1 + Number of time dimension values + height: float, default None + Value for height in metres for zeroth slice of wind + aslist: boolean, default False + Make wind cube into a CubeList of height slices or not. + + """ + if aslist: + self.w_cube = iris.cube.CubeList() + for windfield in wind: + windfield = np.array(windfield) + if windfield.ndim == 1: # only function of height + windfield = np.ones( + windfield.shape+(1, self.n_x, self.n_y) + )*windfield.reshape(windfield.shape+(1, 1, 1)) + self.w_cube.append(set_up_cube( + 1, [self.n_x, self.n_y], windfield.shape[0], + data=windfield, name="wind_speed", unit="m s-1", + height=height)) + else: + wind = np.array(wind) + self.w_cube = iris.cube.Cube + if wind.ndim == 1: # only function of height + wind = np.ones( + wind.shape+(dtime, self.n_x, self.n_y) + )*wind.reshape(wind.shape+(1, 1, 1)) + elif wind.ndim == 2: # function of height and time + wind = np.ones( + wind.shape+(self.n_x, self.n_y) + )*wind.reshape(wind.shape+(1, 1)) + self.w_cube = set_up_cube( + dtime, [self.n_x, self.n_y], wind.shape[0], data=wind, + name="wind_speed", unit="m s-1", height=height) + plugin = RoughnessCorrection( + self.aos_cube, self.s_cube, self.poro_cube, + self.moro_cube, 1500., self.z0_cube + ) + return plugin.process(self.w_cube) + + +class TestSinglePoint(object): + """Test a single 1x1 grid. + + A cube is a single 1x x 1y grid, however, the z dimension is not 1. + It constructs 1x1 cubes for the ancillary fields Silhouette + roughness (AoS) and standard deviation of model height grid cell + (Sigma), vegetative roughness (z_0), post-processing grid orography + (pporog) and model orography(modelorog). If no values are supplied, + the values are: AoS = 0.2, Sigma = 20, z_0 = 0.2, pporog = 250, + modelorog = 230. + + The height level grid (heightlevels) can be supplied as an 1D + array. If nothing is supplied, the height level grid is [0.2, 3, + 13, 33, 133, 333, 1133]. + + """ + + def __init__(self, AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., + modelorog=230., heightlevels=np.array([0.2, 3., 13., 33., + 133., 333., 1133.])): + """Set up the single point test for RoughnessCorrection. + + Parameters + ---------- + + AoS: float + Silhouette roughness field + Sigma: float + Standard deviation field of height in grid cell + z_0: float + Vegetative roughness field + pporog: float + Unsmoothed orography on post-processing grid + modelorog: float + Model orography on post-processing grid + heightlevels: 1D np.array + Height level array + + """ + self.w_cube = None + self.aos_cube = set_up_cube(1, 1, 1, data=AoS, name=None, unit=None) + self.s_cube = set_up_cube(1, 1, 1, data=Sigma, name=None, unit="m") + if z_0 is None: + self.z0_cube = None + else: + self.z0_cube = set_up_cube(1, 1, 1, data=z_0, name=None, unit="m") + self.poro_cube = set_up_cube(1, 1, 1, data=pporog, name=None, + unit="m") + self.moro_cube = set_up_cube(1, 1, 1, data=modelorog, name=None, + unit="m") + if heightlevels is not None: + self.hl_cube = set_up_cube(1, 1, len(heightlevels), + data=heightlevels) + else: + self.hl_cube = None + + def test_hc_rc(self, wind, height=None): + """Test single point height correction and roughness correction. + + Make an iris cube of the supplied wind and set up the height + axis in m. + + Parameters + ---------- + wind: 1 or 2D array + Array of wind speeds + height: float, default None + Value for height in metres for zeroth slice of wind + + """ + wind = np.array(wind) + if wind.ndim == 1: + wind = wind.reshape([1, 1, wind.shape[0]]) + elif wind.ndim == 2: + wind = wind.reshape([wind.shape[0], 1, wind.shape[1]]) + self.w_cube = set_up_cube(wind.shape[0], 1, wind.shape[2], + data=np.rollaxis(wind, 2, start=0), + name="wind_speed", unit="m s-1", + height=height) + plugin = RoughnessCorrection( + self.aos_cube, self.s_cube, self.poro_cube, self.moro_cube, + 1500., self.z0_cube, self.hl_cube + ) + return plugin.process(self.w_cube) + + +class Test1D(IrisTest): + + """Class to test 1 x-y point cubes. + + This class tests the correct behaviour if np.nan or RMDI are + passed, as well as testing the general behaviour of points that + should not have a height corretion (equal height in model and pp + orography) and the correct behaviour of doing roughness correction, + depending on whether or not a vegetative roughness (z_0) cube is + provided. + + Section 0 are tests where RMDI or np.nan values are passed. + Section 1 are sensible single point tests. + + """ + uin = [20., 20., 20., 20., 20., 20., 20.] + hls = [0.2, 3, 13, 33, 133, 333, 1133] + + def test_section0a(self): + """Test AoS is RMDI, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=RMDI, Sigma=20.0, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0b(self): + """Test AoS is np.nan, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=np.nan, Sigma=20.0, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0c(self): + """Test Sigma is RMDI, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=RMDI, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0d(self): + """Test Sigma is np.nan, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=np.nan, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0e(self): + """Test z_0 is RMDI, point should not do RC. + + modeloro = pporo, so point should not do HC, uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=RMDI, pporog=230., modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0f(self): + """Test z_0 is np.nan, point should not do RC. + + modeloro = pporo, so point should not do HC, uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=np.nan, pporog=230., modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0g(self): + """Test z_0 is RMDI, point should not do RC. + + modeloro < pporo, so point should do positive HC, uin < uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=RMDI, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data > + landpointtests_hc_rc.w_cube.data).all()) + + def test_section0h(self): + """Test pporog is RMDI (QUESTION: or should this fail???) + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=RMDI, modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0i(self): + """Test pporog is np.nan (QUESTION: or should this fail???) + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=np.nan, modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0j(self): + """Test modelorog is RMDI (QUESTION: or should this fail???). + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., modelorog=RMDI, + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0k(self): + """Test fail for RMDI in height grid. + + height grid is RMDI at that location somewhere in z-direction, + should fail with ValueError. + + """ + hls = [0.2, 3, 13, RMDI, 133, 333, 1133] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=hls) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(self.uin) + + def test_section0l(self): + """Test fail for np.nan in height grid. + + height grid is np.nan at that location somewhere in z-direction, + should fail with ValueError. + + """ + hls = [0.2, 3, 13, np.nan, 133, 333, 1133] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=hls) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(self.uin) + + def test_section0m(self): + """Test fail for RMDI in uin. + + uin is RMDI at that location somewhere in z-direction, + should fail with ValueError. + + """ + uin = [20., 20., 20., RMDI, RMDI, 20., 0.] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=self.hls + ) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(uin) + + def test_section0n(self): + """Test fail for np.nan in uin. + + uin is np.nan at that location somewhere in z-direction, + should fail with ValueError. + + """ + uin = [20., 20., 20., np.nan, 20., 20., 20.] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=self.hls + ) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(uin) + + def test_section1a(self): + """Test HC only, HC = 0. + + z_0 passed as None, hence RC not performed. + modelorg = pporog, hence HC = 0. + uin = uout + + """ + landpointtests_hc = TestSinglePoint( + z_0=None, pporog=250., modelorog=250.) + land_hc_rc = landpointtests_hc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc.w_cube, land_hc_rc) + + def test_section1b(self): + """Test HC only. + + z_0 passed as None, hence RC not performed. + modelorg < pporog, hence positive HC. + uin <= uout, at least one height has uin < uout. + + """ + landpointtests_hc = TestSinglePoint( + z_0=None, pporog=250., modelorog=230.) + land_hc_rc = landpointtests_hc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data >= + landpointtests_hc.w_cube.data).all() and + (land_hc_rc.data > + landpointtests_hc.w_cube.data).any()) + + def test_section1c(self): + """Test RC and HC, HC=0. + + z_0 passed, hence RC performed. + modelorg == pporog, hence no HC. + uin >= uout, at least one height has uin > uout, uout[0] = 0. + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + land_hc_rc = landpointtests_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_rc.w_cube.data).all() and + (land_hc_rc.data < + landpointtests_rc.w_cube.data).any() and + land_hc_rc.data[0] == 0) + + def test_section1d(self): + """Test RC and HC. + + z_0 passed, hence RC performed. + modelorg >> pporog, hence negative HC. + uin >= uout, at least one height has uin > uout + z_0 = height[0] hence RC[0] results in 0. + uout[0] RC is 0. HC is negative, negative speeds not allowed. + Must be 0. + + """ + landpointtests_hc_rc = TestSinglePoint( + z_0=0.2, pporog=230., modelorog=250.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + (land_hc_rc.data < + landpointtests_hc_rc.w_cube.data).any() and + (land_hc_rc.data >= 0).all() and + land_hc_rc.data[0] == 0) + + def test_section1e(self): + """Test RC and HC, but sea point masked out (AoS). + + Sea point according to (AoS=0) => masked out. + z_0 passed, hence RC performed in theory. + uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + z_0=0.2, AoS=0.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section1f(self): + """Test RC and HC, but sea point masked out (Sigma). + + Sea point according to (Sigma=0) => masked out + z_0 passed, hence RC performed in theory. + uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint(z_0=0.2, Sigma=0.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + +class Test2D(IrisTest): + + """Test multi-point wind corrections. + + Section 2 are multiple point, multiple time tests + Section 3 are tests that should fail because the grids are not all + the same or units are wrong. + + """ + uin = [20., 20., 20., 20., 20., 20., 20.] + hls = [0.2, 3, 13, 33, 133, 333, 1133] + + def test_section2a(self): + """Test multiple points. + + All points should have equal u profile hence all points in a + slice over height should be equal. + + """ + hlvs = 10 + uin = np.ones(hlvs)*20 + heights = ((np.arange(hlvs)+1)**2.)*12. + multip_hc_rc = TestMultiPoint(3) + land_hc_rc = multip_hc_rc.test_hc_rc(uin, dtime=1, height=heights) + hidx = land_hc_rc.shape.index(hlvs) + for field in land_hc_rc.slices_over(hidx): + self.failUnless((field.data == field.data[0, 0]).all()) + + def test_section2b(self): + """Test a mix of sea and land points over multiple timesteps. + + p1: sea point, no correction + p2: land point, equal height, RC (<=uin), no HC + p3: land point, model=p2 everywhere + Two time steps tested, should be equal. + + """ + uin = np.ones(10)*20 + heights = ((np.arange(10)+1)**2.)*12 + multip_hc_rc = TestMultiPoint( + nx_ny=[3, 1], AoS=[0, 0.2, 0.2], pporog=[0, 250, 250], + modelorog=[0, 250, 230]) + land_hc_rc = multip_hc_rc.test_hc_rc(uin, dtime=2, height=heights) + tidx = land_hc_rc.shape.index(2) + time1 = land_hc_rc.data.take(0, axis=tidx) + time2 = land_hc_rc.data.take(1, axis=tidx) + # Check on time. + self.assertArrayEqual(time1, time2) + xidxnew = land_hc_rc.shape.index(3) + xidxold = multip_hc_rc.w_cube.data.shape.index(3) + landp1new = land_hc_rc.data.take(0, axis=xidxnew) + landp1old = multip_hc_rc.w_cube.data.take(0, axis=xidxold) + # Check on p1. + self.assertArrayEqual(landp1new, landp1old) + landp2new = land_hc_rc.data.take(1, axis=xidxnew) + landp2old = multip_hc_rc.w_cube.data.take(1, axis=xidxold) + # Check on p2. + self.failUnless((landp2new <= landp2old).all() and + (landp2new < landp2old).any()) + landp3new = land_hc_rc.data.take(2, axis=xidxnew) + # Check on p3. + self.failUnless((landp2new <= landp3new).all() and + (landp2new < landp3new).any()) + + def test_section2c(self): + """As test 2b, but passing the two time steps in a list. + + timesteps are a list rather than a 4D cube. This should raise + an error. + + """ + uin = np.ones(10)*20 + heights = ((np.arange(10)+1)**2.)*12 + multip_hc_rc = TestMultiPoint( + nx_ny=[3, 1], AoS=[0, 0.2, 0.2], pporog=[0, 250, 250], + modelorog=[0, 250, 230] + ) + msg = "wind input is not a cube, but " + with self.assertRaisesRegexp(ValueError, msg): + _ = multip_hc_rc.test_hc_rc([uin, uin], dtime=2, height=heights, + aslist=True) + + def test_section3a(self): + """As test 1c, however with manipulated z_0 cube. + + All ancillary fields have 1x1 dim, z_0 is on a different grid. + This should fail with ValueError("ancillary grids are not + consistent"). + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.z0_cube = set_up_cube( + 1, [1, 2], 1, data=np.array([landpointtests_rc.z0_cube.data, + landpointtests_rc.z0_cube.data]), + height=0, name=None, unit="m") + msg = "ancillary grids are not consistent" + with self.assertRaisesRegexp(ValueError, msg): + _ = landpointtests_rc.test_hc_rc(self.uin) + + def test_section3b(self): + """As test 3a, however with manipulated modelorog cube instead. + + This should fail with ValueError("ancillary grids are not + consistent"). + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.moro_cube = set_up_cube( + 1, [1, 2], 1, data=np.array([landpointtests_rc.moro_cube.data, + landpointtests_rc.moro_cube.data]), + height=0, name=None, unit="m") + msg = "ancillary grids are not consistent" + with self.assertRaisesRegexp(ValueError, msg): + _ = landpointtests_rc.test_hc_rc(self.uin) + + def test_section3c(self): + """As test 3a, however with manipulated z_0 units. + + This should fail with a wrong units error. + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.z0_cube.units = Unit('s') + msg = ("z0 ancil has unexpected unit: should be {} " + "is {}") + with self.assertRaisesRegexp( + ValueError, msg.format(Unit('m'), + landpointtests_rc.z0_cube.units)): + _ = landpointtests_rc.test_hc_rc(self.uin) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py new file mode 100644 index 0000000000..53de10fc15 --- /dev/null +++ b/lib/improver/wind_downscaling.py @@ -0,0 +1,964 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module containing wind downscaling plugins.""" + +import copy +import itertools + +from cf_units import Unit +import iris +import numpy as np + +from improver.constants import RMDI + +# Scale parameter to determine reference height +ABSOLUTE_CORRECTION_TOL = 0.04 + +# Scaling parameter to determine reference height +HREF_SCALE = 2.0 + +# Von Karman's constant +VONKARMAN = 0.4 + +# Default roughness length for sea points +Z0M_SEA = 0.0001 + + +class FrictionVelocity(object): + + """"Class to calculate the friction velocity. + + This holds the function to calculate the friction velocity u_star, + given a reference height h_ref, the velocity at the reference + height u_href and the surface roughness z_0. + + """ + + def __init__(self, u_href, h_ref, z_0, mask): + """Initialise the class. + + Parameters: + ----------- + u_href: 2D np.array (float) + wind speed at h_ref + h_ref: 2D np.array (float) + reference height + z_0: 2D np.array (float) + vegetative roughness length + mask: 2D np.array (logical) + where True, calculate u* + + comments: + * z_0 and h_ref need to have identical units. + * the calculated friction velocity will have the units of the + supplied velocity u_href. + + """ + self.u_href = u_href + self.h_ref = h_ref + self.z_0 = z_0 + self.mask = mask + + def calc_ustar(self): + """Function to calculate the friction velocity. + + Returns: + -------- + ustar: 2D array (float) + friction velocity + + """ + ustar = np.ones(self.u_href.shape) * RMDI + ustar[self.mask] = VONKARMAN * (self.u_href[self.mask]/np.log + (self.h_ref[self.mask] / + self.z_0[self.mask])) + return ustar + + +class RoughnessCorrectionUtilities(object): + + """Class to calculate the height and roughness wind corrections. + + This holds functions to calculate the roughness and height + corrections given the ancil files: + * standard deviation of hight in grid cell as sigma (model grid on pp grid) + * Silhouette roughness as a_over_s (model grid on pp grid) + * vegetative roughness length z_0 (model grid on pp grid) + * post-processing grid orography pporo + * model grid orography interpolated on post-processing grid modoro + * height level 3D/ 1D grid + and + * windspeed 3D field on height level 3D grid (from above). + + """ + + def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): + """Set up roughness and height correction. + + This sets up the parameters used for roughness and height + correction given the ancillary file inputs: + + Parameters: + ---------- + a_over_s: 2D array (float) + Silhouette roughness field, dimensionless ancillary data, + calculated according to Robinson (2008) + sigma: 2D array (float) + Standard deviation field of height in the grid cell, units + of length + z_0: 2D array (float) + Vegetative roughness height field, units of length + pporo: 2D array (float) + Post processing grid orography field + modoro: 2D array (float) + Model orography field interpolated to post processing grid + ppres: scalar (float) + Grid cell length of post processing grid + modres: scalar (float) + Grid cell length of model grid + + """ + self.a_over_s = a_over_s + self.z_0 = z_0 + if z_0 is None: + self.l_no_winddownscale = True + else: + self.l_no_winddownscale = False + self.pporo = pporo + self.modoro = modoro + self.h_over_2 = self.sigma2hover2(sigma) # half peak to trough height + self.hcmask, self.rcmask = self.setmask() # HC mask, RC mask + if not self.l_no_winddownscale: + self.z_0[z_0 <= 0] = Z0M_SEA + self.dx_min = ppres/2. # scales smaller than this not resolved in pp + # the original code had hardcoded 500 + self.dx_max = 3.*modres # scales larger than this resolved in model + # the original code had hardcoded 4000 + self.wavenum = self.calc_wav() # k = 2*pi / L + self.h_ref = self.calc_h_ref() + self.refinemask() # HC mask needs to be updated for missing orography + self.h_at0 = self.delta_height() # pp orography - model orography + + def refinemask(self): + """Remask over RMDI and NaN orography. + + The mask for HC needs to be False where either of the + orographies (model or pp) has an invalid number. This cannot be + done before because the mask is used to calculate the + wavenumber which can and should be calculated for all points + where h_over_2 and a_over_s is a valid number. + + """ + self.hcmask[self.pporo == RMDI] = False + self.hcmask[self.modoro == RMDI] = False + self.hcmask[np.isnan(self.pporo)] = False + self.hcmask[np.isnan(self.modoro)] = False + + def setmask(self): + """Create a ~land-sea mask. + + Create a mask that is basically a land-sea mask: + Both, the standard deviation and the silouette roughness, are 0 + over the sea. A standard deviation of 0 results in a RMDI for + h_over_2. + + Returns: + ------- + hcmask: 2D array (logical) + True for land-points, false for Sea (HC) + rcmask: 2D array (logical) + additionally False for invalid z_0 (RC) + + """ + hcmask = np.full(self.h_over_2.shape, True, dtype=bool) + hcmask[self.h_over_2 <= 0] = False + hcmask[self.a_over_s <= 0] = False + hcmask[np.isnan(self.h_over_2)] = False + hcmask[np.isnan(self.a_over_s)] = False + rcmask = np.copy(hcmask) + if not self.l_no_winddownscale: + rcmask[self.z_0 <= 0] = False + rcmask[np.isnan(self.z_0)] = False + return hcmask, rcmask + + @staticmethod + def sigma2hover2(sigma): + """Calculate the half peak-to-trough height. + + The ancillary data used to estimate the peak to trough height + contains the standard deviation of height in a cell. For + sine-waves, this relates to the amplitude of the wave as: + + Amplitude = sigma * sqrt(2) + + The amplitude would correspond to half the peak-to-trough + height (h_o_2). + + Parameters: + ----------- + sigma: 2D array + standard deviation of height in grid cell. + + Returns: + -------- + h_o_2: 2D array + of half peak-to-trough height. + + Comments: + Points that had sigma = 0 (i.e. sea points) are set to + RMDI. + + """ + h_o_2 = np.ones(sigma.shape) * RMDI + h_o_2[sigma > 0] = sigma[sigma > 0] * np.sqrt(2.0) + return h_o_2 + + def calc_wav(self): + """Calculate wavenumber k of typical orographic lengthscale. + + Function to calculate wavenumber k of typical orographic + lengthscale L: + k = 2*pi / L (1) + + L is approximated from half the peak-to-trough height h_over_2 + and the silhoutte roughness a_over_s (average of up-slopes per + unit length over several cross-sections through a grid cell) + as: + L = 2*h_over_2 / a_over_s (2) + + a_over_s is dimensionless since it is the sum of up-slopes + measured in the same unit lengths as it is calculated over. + + h_over_2 is calculated from the standard deviation of height in + a grid cell, sigma, as: + h_over_2 = sqrt(2) * sigma + + which is based on the assumptions of sine waves, see + sigma2hover2. + + From eq. (1) and (2) it follows that: + k = 2*pi / (2*h_over_2 / a_over_s) + = a_over_s * pi / h_over_2 + + Returns: + -------- + wavn: 2D np.array + wavenumber in units of inverse units of supplied h_over_2. + + """ + wavn = np.ones(self.a_over_s.shape) * RMDI + wavn[self.hcmask] = (self.a_over_s[self.hcmask] / + self.h_over_2[self.hcmask]*np.pi) + wavn[wavn > np.pi/self.dx_min] = np.pi/self.dx_min + wavn[self.h_over_2 == 0] = RMDI + wavn[abs(wavn) < np.pi/self.dx_max] = np.pi/self.dx_max + return wavn + + def calc_h_ref(self): + """Calculate the reference height for roughness correction. + + The reference height below which the flow is in equilibrium + with the vegetative roughness is proportional to 1/wavenum + (Howard & Clark, 2007). + + Vosper (2009) and Clark (2009) argue that at the reference + height, the perturbation should have decayed to a fraction + epsilon (ABSOLUTE_CORRECTION_TOL). The factor alpha + implements eq. 1.3 in Clark (2009): UK Climatology - Wind + Screening Tool. See also Vosper (2009) for a motivation. + + alpha is the log of scale parameter to determine reference + height which is currently set to 0.04 (this corresponds to + epsilon in both Vosper and Clark) + + Returns: + -------- + h_ref: 2D np.array (float) + reference height for roughness correction + + """ + alpha = -np.log(ABSOLUTE_CORRECTION_TOL) + aparam = np.ones(self.wavenum.shape) * RMDI + h_ref = np.ones(self.wavenum.shape) * RMDI + aparam[self.hcmask] = alpha + np.log(self.wavenum[self.hcmask] * + self.h_over_2[self.hcmask]) + aparam[aparam > 1.0] = 1.0 + aparam[aparam < 0.0] = 0.0 + h_ref[self.hcmask] = aparam[self.hcmask] / self.wavenum[self.hcmask] + h_ref[h_ref < 1.0] = 1.0 + h_ref = np.minimum(h_ref, HREF_SCALE*self.h_over_2) + h_ref[h_ref < 1.0] = 1.0 + h_ref[~self.hcmask] = 0.0 + return h_ref + + def roughness_correction_sub(self, hgrid, uold, mask): + """Function to perform the roughness correction. + + Parameters: + ---------- + hgrid: 3D or 1D np.array (float) + height above orography + uold: 3D np.array (float) + original velocities at hgrid. + + Returns: + -------- + unew: 3D np.array (float) + Corrected wind speed on hgrid. Above href, this is + equal to uold. + + Comments: + Replace the windspeed profile below the reference height with one + that increases logarithmic with height, bound by the original + velocity uhref at the reference height h_ref and by a 0 velocity at + the vegetative roughness height z_0 + + """ + uhref = self.calc_u_at_h(uold, hgrid, self.h_ref, mask) + if hgrid.ndim == 1: + hgrid = hgrid.reshape((1, 1, )+(hgrid.shape[0],)) + ustar = FrictionVelocity(uhref, self.h_ref, self.z_0, + mask).calc_ustar() + unew = np.copy(uold) + mhref = self.h_ref + mhref[~mask] = RMDI + cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) + unew[cond] = ( + ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) + )[cond]*(np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) + * np.ones(unew.shape)))[cond])/VONKARMAN + return unew + + def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): + """Function to interpolate u_in on h_in at hhere. + + Parameters: + ---------- + u_in: 3D array (float) + velocity on h_in layer, last dim is height + h_in: 3D or 1D array (float) + height layer array + hhere: 2D array (float) + height grid to interpolate at + (dolog: scalar (logial) + if True, log interpolation, default False) + + Returns: + ------- + uath: 2D array (float) + velocity interpolated at h + + """ + u_in = np.ma.masked_less(u_in, 0.0) + h_in = np.ma.masked_less(h_in, 0.0) + # h_in.mask = u_in.mask + # If I allow 1D height grids, I think I cannot do the hop over. + + # Ignores the height at the position where u_in is RMDI,"hops over" + hhere = np.ma.masked_less(hhere, 0.0) + upidx = np.argmax(h_in > hhere.reshape(hhere.shape+(1,)), axis=2) + # loidx = np.maximum(upidx-1, 0) #if RMDI, need below + loidx = np.argmin(np.ma.masked_less(hhere.reshape(hhere.shape+(1,)) - + h_in, 0.0), axis=2) + + if h_in.ndim == 3: + hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size + * h_in.shape[2], + h_in.shape[2])) + hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size + * h_in.shape[2], + h_in.shape[2])) + elif h_in.ndim == 1: + hup = h_in[upidx].flatten() + hlow = h_in[loidx].flatten() + uup = u_in.take(upidx.flatten()+np.arange(0, upidx.size*u_in.shape[2], + u_in.shape[2])) + ulow = u_in.take(loidx.flatten()+np.arange(0, loidx.size*u_in.shape[2], + u_in.shape[2])) + mask = mask.flatten() + uath = np.full(mask.shape, RMDI, dtype=float) + if dolog: + uath[mask] = self.loginterpol(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) + else: + uath[mask] = self.interp1d(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) + uath = np.reshape(uath, hhere.shape) + return uath + + @staticmethod + def interp1d(xup, xlow, at_x, yup, ylow): + """Simple 1D linear interpolation for 2D grid inputs level. + + Parameters: + ---------- + xlow: 2D np.array (float) + lower x-bins + xup: 2D np.array (float) + upper x-bins + at_x: 2D np.array (float) + x values to interpolate y at + yup: 2D np.array(float) + y(xup) + ylow: 2D np.array (float) + y(xlow) + + Returns: + ------- + interp: 2D np.array (float) + y(at_x) assuming a lin function between xlow and xup + + """ + interp = np.full(xup.shape, RMDI, dtype=float) + diffs = (xup - xlow) + interp[diffs != 0] = ( + ylow[diffs != 0]+((at_x[diffs != 0]-xlow[diffs != 0]) / + diffs[diffs != 0]*(yup[diffs != 0] + - ylow[diffs != 0]))) + interp[diffs == 0] = at_x[diffs == 0]/xup[diffs == 0]*(yup[diffs == 0]) + return interp + + @staticmethod + def loginterpol(x_u, x_l, at_x, y_u, y_l): + """Simple 1D log interpolation y(x), except if lowest layer is + ground level. + + Parameters: + ---------- + x_l: 2D np.array (float) + lower x-bins + x_u: 2D np.array (float) + upper x-bins + at_x: 2D np.array (float) + x values to interpolate y at + y_u: 2D np.array (float) + y(x_u) + y_l: 2D np.array (float) + y(x_l) + + Returns: + ------- + loginterp: 2D np.array (float) + y(at_x) assuming a log function between x_l and x_u + + """ + ain = np.full(x_u.shape, RMDI, dtype=float) + loginterp = np.full(x_u.shape, RMDI, dtype=float) + mfrac = x_u/x_l + mtest = (x_u/x_l != 1) & (at_x != x_u) + ain[mtest] = (y_u[mtest] - y_l[mtest])/np.log(mfrac[mtest]) + loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/x_u[mtest])+y_u[mtest] + mtest = (x_u/x_l == 1) # below lowest layer, make lin interp + loginterp[mtest] = at_x[mtest]/x_u[mtest] * (y_u[mtest]) + mtest = (at_x == x_u) # just use y_u + loginterp[mtest] = y_u[mtest] + return loginterp + + def height_corr_sub(self, u_a, heightg, mask, onemfrac): + """Function to calculate the additive height correction. + + Parameters: + ---------- + u_a: 2D array (float) + outer velocity, e.g. velocity at h_ref_orig + heightg: 1D or 3D array + heights above orography + onemfrac: currently, scalar = 1. + In principle, it is a function of position and height, e.g. + a 3D array (float) + + Returns: + ------- + hc_add: 3D array (float) + additive height correction to wind speed + + Comments: + The height correction is a disturbance of the flow that + decays exponentially with height. The larger the vertical + offset h_at0 (the higher the unresolved hill), the larger + is the disturbance. + + The more smooth the distrubance (the larger the horizontal + scale of the disturbance), the smaller the height + correction (hence, a larger wavenumber results in a larger + disturbance). + hc_add = exp(-height*wavenumber)*u(href)*h_at_0*k + + """ + (xdim, ydim) = u_a.shape + if heightg.ndim == 1: + zdim = heightg.shape[0] + heightg = heightg.reshape((1, 1, zdim)) + elif heightg.ndim == 3: + zdim = heightg.shape[2] + ml2 = self.h_at0*self.wavenum + expon = np.ones([xdim, ydim, zdim]) + mult = (self.wavenum).reshape(self.wavenum.shape+(1,))*heightg + expon[mult > 0.0001] = np.exp(-mult[mult > 0.0001]) + hc_add = ( + expon*u_a.reshape(u_a.shape+(1,)) * + ml2.reshape(ml2.shape+(1,))*onemfrac) + hc_add[~mask, :] = 0 + return hc_add + + def delta_height(self): + """Function to calculate pp-grid diff from model grid. + + Calculate the difference between pp-grid height and model + grid height. + + Returns: + ------- + deltZ: 2D np.array (float) + height difference, ppgrid-model + + """ + delt_z = np.ones(self.pporo.shape) * RMDI + delt_z[self.hcmask] = self.pporo[self.hcmask]-self.modoro[self.hcmask] + return delt_z + + def do_rc_hc_all(self, hgrid, uorig): + """Function to call HC and RC (height and roughness corrections). + + Parameters: + ---------- + hgrid: 1D or 3D array (float) + height grid of wind input + uorig: 3D array (float) + wind speed on these levels + + Returns: + ------- + result: 3D array + sum of unew: 3D array (float) RC corrected windspeed + on levels HC: 3D array (float) HC additional part + + """ + if hgrid.ndim == 3: + condition1 = ((hgrid == RMDI).any(axis=2)) + self.hcmask[condition1] = False + self.rcmask[condition1] = False + mask_rc = np.copy(self.rcmask) + mask_rc[(uorig == RMDI).any(axis=2)] = False + mask_hc = np.copy(self.hcmask) + mask_hc[(uorig == RMDI).any(axis=2)] = False + if not self.l_no_winddownscale: + unew = self.roughness_correction_sub(hgrid, uorig, mask_rc) + else: + unew = uorig + uhref_orig = self.calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) + mask_hc[uhref_orig <= 0] = False + onemfrac = 1.0 + # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) + hc_add = self.height_corr_sub(uhref_orig, hgrid, mask_hc, onemfrac) + result = unew + hc_add + result[result < 0.] = 0 # HC can be negative if pporo Date: Wed, 19 Apr 2017 09:00:03 +0100 Subject: [PATCH 0002/1367] Minor style corrections --- ...st_wind_downscaling_roughnesscorrection.py | 59 +++++++++---------- lib/improver/wind_downscaling.py | 2 + 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py index ac702b8a22..31f9f58be8 100644 --- a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py +++ b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py @@ -30,8 +30,8 @@ # POSSIBILITY OF SUCH DAMAGE. """Unit tests for plugin wind_downscaling.RoughnessCorrection.""" -import unittest +import unittest from cf_units import Unit import iris @@ -125,18 +125,18 @@ def __init__(self, nx_ny=3, AoS=None, Sigma=None, z_0=0.2, pporog=None, Parameters ---------- - nxny: a scalar or an np.array([x,y]) - Sets dimension for tests. - AoS: float or 1D or 2D array - Silhouette roughness field - Sigma: float or 1D or 2D array - Standard deviation field of height in grid cell - z_0: float or 1D or 2D array - Vegetative roughness field - pporog: float or 1D or 2D array - Unsmoothed orography field on post-processing grid - modelorog: float or 1D or 2D array - Model orography field on post-processing grid + nxny: a scalar or an np.array([x,y]) + Sets dimension for tests. + AoS: float or 1D or 2D array + Silhouette roughness field + Sigma: float or 1D or 2D array + Standard deviation field of height in grid cell + z_0: float or 1D or 2D array + Vegetative roughness field + pporog: float or 1D or 2D array + Unsmoothed orography field on post-processing grid + modelorog: float or 1D or 2D array + Model orography field on post-processing grid """ if isinstance(nx_ny, int): @@ -257,19 +257,18 @@ def __init__(self, AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., Parameters ---------- - - AoS: float - Silhouette roughness field - Sigma: float - Standard deviation field of height in grid cell - z_0: float - Vegetative roughness field - pporog: float - Unsmoothed orography on post-processing grid - modelorog: float - Model orography on post-processing grid - heightlevels: 1D np.array - Height level array + AoS: float + Silhouette roughness field + Sigma: float + Standard deviation field of height in grid cell + z_0: float + Vegetative roughness field + pporog: float + Unsmoothed orography on post-processing grid + modelorog: float + Model orography on post-processing grid + heightlevels: 1D np.array + Height level array """ self.w_cube = None @@ -297,10 +296,10 @@ def test_hc_rc(self, wind, height=None): Parameters ---------- - wind: 1 or 2D array - Array of wind speeds - height: float, default None - Value for height in metres for zeroth slice of wind + wind: 1 or 2D array + Array of wind speeds + height: float, default None + Value for height in metres for zeroth slice of wind """ wind = np.array(wind) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 53de10fc15..bcca68597d 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -30,6 +30,7 @@ # POSSIBILITY OF SUCH DAMAGE. """Module containing wind downscaling plugins.""" + import copy import itertools @@ -39,6 +40,7 @@ from improver.constants import RMDI + # Scale parameter to determine reference height ABSOLUTE_CORRECTION_TOL = 0.04 From 66ce303f33c71159cc25eb83888686feaa085071 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Wed, 19 Apr 2017 20:52:20 +0100 Subject: [PATCH 0003/1367] Fix pep8 and pylint numpy skip --- .travis.yml | 2 +- lib/improver/wind_downscaling.py | 17 +++++++++-------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4401251fea..183568455d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -67,5 +67,5 @@ script: - python -c "import iris" - cd lib - pep8 improver - - pylint -E improver + - pylint --extension-pkg-whitelist=numpy -E improver - python -m unittest discover diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index bcca68597d..86f23d05ff 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -355,8 +355,9 @@ def roughness_correction_sub(self, hgrid, uold, mask): cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) unew[cond] = ( ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) - )[cond]*(np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) - * np.ones(unew.shape)))[cond])/VONKARMAN + )[cond] * ( + np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) * + np.ones(unew.shape)))[cond])/VONKARMAN return unew def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): @@ -392,11 +393,11 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): h_in, 0.0), axis=2) if h_in.ndim == 3: - hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size - * h_in.shape[2], + hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size * + h_in.shape[2], h_in.shape[2])) - hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size - * h_in.shape[2], + hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size * + h_in.shape[2], h_in.shape[2])) elif h_in.ndim == 1: hup = h_in[upidx].flatten() @@ -445,8 +446,8 @@ def interp1d(xup, xlow, at_x, yup, ylow): diffs = (xup - xlow) interp[diffs != 0] = ( ylow[diffs != 0]+((at_x[diffs != 0]-xlow[diffs != 0]) / - diffs[diffs != 0]*(yup[diffs != 0] - - ylow[diffs != 0]))) + diffs[diffs != 0]*(yup[diffs != 0] - + ylow[diffs != 0]))) interp[diffs == 0] = at_x[diffs == 0]/xup[diffs == 0]*(yup[diffs == 0]) return interp From 42ded96a2b48b68a60162b66ff1fee54c307cc6c Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Thu, 4 May 2017 09:09:44 +0100 Subject: [PATCH 0004/1367] First round of review feedback --- lib/improver/wind_downscaling.py | 195 +++++++++++++++++-------------- 1 file changed, 109 insertions(+), 86 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 86f23d05ff..24aa59f278 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -92,16 +92,26 @@ def __init__(self, u_href, h_ref, z_0, mask): def calc_ustar(self): """Function to calculate the friction velocity. + ustar = K * u_href / ln(h_ref / z_0) + + where ustar is the friction velocity, K is Von Karman's + constant, u_ref is the wind speed at the reference height, + h_ref is the reference height and z_0 is the vegetative + roughness length. + Returns: -------- ustar: 2D array (float) friction velocity """ - ustar = np.ones(self.u_href.shape) * RMDI - ustar[self.mask] = VONKARMAN * (self.u_href[self.mask]/np.log - (self.h_ref[self.mask] / - self.z_0[self.mask])) + ustar = np.full(self.u_href.shape, RMDI) + ustar[self.mask] = ( + VONKARMAN * ( + self.u_href[self.mask] / + np.log(self.h_ref[self.mask] / self.z_0[self.mask]) + ) + ) return ustar @@ -111,7 +121,8 @@ class RoughnessCorrectionUtilities(object): This holds functions to calculate the roughness and height corrections given the ancil files: - * standard deviation of hight in grid cell as sigma (model grid on pp grid) + * standard deviation of height in grid cell as sigma (model grid on + pp grid) * Silhouette roughness as a_over_s (model grid on pp grid) * vegetative roughness length z_0 (model grid on pp grid) * post-processing grid orography pporo @@ -132,7 +143,9 @@ def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): ---------- a_over_s: 2D array (float) Silhouette roughness field, dimensionless ancillary data, - calculated according to Robinson (2008) + calculated according to Robinson, D. (2008) - Ancillary + file creation for the UM, Unified Model Documentation Paper + 73. sigma: 2D array (float) Standard deviation field of height in the grid cell, units of length @@ -150,26 +163,22 @@ def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): """ self.a_over_s = a_over_s self.z_0 = z_0 - if z_0 is None: - self.l_no_winddownscale = True - else: - self.l_no_winddownscale = False self.pporo = pporo self.modoro = modoro self.h_over_2 = self.sigma2hover2(sigma) # half peak to trough height - self.hcmask, self.rcmask = self.setmask() # HC mask, RC mask - if not self.l_no_winddownscale: + self.hcmask, self.rcmask = self._setmask() # HC mask, RC mask + if self.z_0 is not None: self.z_0[z_0 <= 0] = Z0M_SEA self.dx_min = ppres/2. # scales smaller than this not resolved in pp # the original code had hardcoded 500 self.dx_max = 3.*modres # scales larger than this resolved in model # the original code had hardcoded 4000 - self.wavenum = self.calc_wav() # k = 2*pi / L - self.h_ref = self.calc_h_ref() - self.refinemask() # HC mask needs to be updated for missing orography - self.h_at0 = self.delta_height() # pp orography - model orography + self.wavenum = self._calc_wav() # k = 2*pi / L + self.h_ref = self._calc_h_ref() + self._refinemask() # HC mask needs to be updated for missing orography + self.h_at0 = self._delta_height() # pp orography - model orography - def refinemask(self): + def _refinemask(self): """Remask over RMDI and NaN orography. The mask for HC needs to be False where either of the @@ -184,7 +193,7 @@ def refinemask(self): self.hcmask[np.isnan(self.pporo)] = False self.hcmask[np.isnan(self.modoro)] = False - def setmask(self): + def _setmask(self): """Create a ~land-sea mask. Create a mask that is basically a land-sea mask: @@ -206,7 +215,7 @@ def setmask(self): hcmask[np.isnan(self.h_over_2)] = False hcmask[np.isnan(self.a_over_s)] = False rcmask = np.copy(hcmask) - if not self.l_no_winddownscale: + if self.z_0 is not None: rcmask[self.z_0 <= 0] = False rcmask[np.isnan(self.z_0)] = False return hcmask, rcmask @@ -239,11 +248,11 @@ def sigma2hover2(sigma): RMDI. """ - h_o_2 = np.ones(sigma.shape) * RMDI + h_o_2 = np.full(sigma.shape, RMDI) h_o_2[sigma > 0] = sigma[sigma > 0] * np.sqrt(2.0) return h_o_2 - def calc_wav(self): + def _calc_wav(self): """Calculate wavenumber k of typical orographic lengthscale. Function to calculate wavenumber k of typical orographic @@ -276,15 +285,17 @@ def calc_wav(self): wavenumber in units of inverse units of supplied h_over_2. """ - wavn = np.ones(self.a_over_s.shape) * RMDI - wavn[self.hcmask] = (self.a_over_s[self.hcmask] / - self.h_over_2[self.hcmask]*np.pi) + wavn = np.full(self.a_over_s.shape, RMDI) + wavn[self.hcmask] = ( + (self.a_over_s[self.hcmask] * np.pi) / + self.h_over_2[self.hcmask] + ) wavn[wavn > np.pi/self.dx_min] = np.pi/self.dx_min wavn[self.h_over_2 == 0] = RMDI wavn[abs(wavn) < np.pi/self.dx_max] = np.pi/self.dx_max return wavn - def calc_h_ref(self): + def _calc_h_ref(self): """Calculate the reference height for roughness correction. The reference height below which the flow is in equilibrium @@ -296,6 +307,9 @@ def calc_h_ref(self): epsilon (ABSOLUTE_CORRECTION_TOL). The factor alpha implements eq. 1.3 in Clark (2009): UK Climatology - Wind Screening Tool. See also Vosper (2009) for a motivation. + For a freely available external reference, see the Virtual Met + Mast Version 1 Methodology and Verification paper under + www.thecrownestate.co.uk. alpha is the log of scale parameter to determine reference height which is currently set to 0.04 (this corresponds to @@ -308,20 +322,23 @@ def calc_h_ref(self): """ alpha = -np.log(ABSOLUTE_CORRECTION_TOL) - aparam = np.ones(self.wavenum.shape) * RMDI - h_ref = np.ones(self.wavenum.shape) * RMDI - aparam[self.hcmask] = alpha + np.log(self.wavenum[self.hcmask] * - self.h_over_2[self.hcmask]) - aparam[aparam > 1.0] = 1.0 - aparam[aparam < 0.0] = 0.0 - h_ref[self.hcmask] = aparam[self.hcmask] / self.wavenum[self.hcmask] + tunable_param = np.full(self.wavenum.shape, RMDI) + h_ref = np.full(self.wavenum.shape, RMDI) + tunable_param[self.hcmask] = ( + alpha + np.log(self.wavenum[self.hcmask] * + self.h_over_2[self.hcmask]) + ) + tunable_param[tunable_param > 1.0] = 1.0 + tunable_param[tunable_param < 0.0] = 0.0 + h_ref[self.hcmask] = ( + tunable_param[self.hcmask] / self.wavenum[self.hcmask]) h_ref[h_ref < 1.0] = 1.0 - h_ref = np.minimum(h_ref, HREF_SCALE*self.h_over_2) + h_ref = np.minimum(h_ref, HREF_SCALE * self.h_over_2) h_ref[h_ref < 1.0] = 1.0 h_ref[~self.hcmask] = 0.0 return h_ref - def roughness_correction_sub(self, hgrid, uold, mask): + def calc_roughness_correction(self, hgrid, uold, mask): """Function to perform the roughness correction. Parameters: @@ -339,28 +356,28 @@ def roughness_correction_sub(self, hgrid, uold, mask): Comments: Replace the windspeed profile below the reference height with one - that increases logarithmic with height, bound by the original + that increases logarithmically with height, bound by the original velocity uhref at the reference height h_ref and by a 0 velocity at the vegetative roughness height z_0 """ - uhref = self.calc_u_at_h(uold, hgrid, self.h_ref, mask) + uhref = self._calc_u_at_h(uold, hgrid, self.h_ref, mask) if hgrid.ndim == 1: - hgrid = hgrid.reshape((1, 1, )+(hgrid.shape[0],)) + hgrid = hgrid[np.newaxis, np.newaxis, :] ustar = FrictionVelocity(uhref, self.h_ref, self.z_0, mask).calc_ustar() unew = np.copy(uold) mhref = self.h_ref mhref[~mask] = RMDI - cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) + cond = hgrid < self.h_ref[:, np.newaxis] unew[cond] = ( - ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) + ustar[:, np.newaxis]*np.ones(unew.shape) )[cond] * ( np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) * np.ones(unew.shape)))[cond])/VONKARMAN return unew - def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): + def _calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): """Function to interpolate u_in on h_in at hhere. Parameters: @@ -371,7 +388,9 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): height layer array hhere: 2D array (float) height grid to interpolate at - (dolog: scalar (logial) + mask: 2D array (logical) + mask the final result for uath + (dolog: scalar (logical) if True, log interpolation, default False) Returns: @@ -387,9 +406,9 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): # Ignores the height at the position where u_in is RMDI,"hops over" hhere = np.ma.masked_less(hhere, 0.0) - upidx = np.argmax(h_in > hhere.reshape(hhere.shape+(1,)), axis=2) + upidx = np.argmax(h_in > hhere[:, np.newaxis], axis=2) # loidx = np.maximum(upidx-1, 0) #if RMDI, need below - loidx = np.argmin(np.ma.masked_less(hhere.reshape(hhere.shape+(1,)) - + loidx = np.argmin(np.ma.masked_less(hhere[:, np.newaxis] - h_in, 0.0), axis=2) if h_in.ndim == 3: @@ -409,26 +428,26 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): mask = mask.flatten() uath = np.full(mask.shape, RMDI, dtype=float) if dolog: - uath[mask] = self.loginterpol(hup[mask], hlow[mask], - hhere.flatten()[mask], - uup[mask], ulow[mask]) + uath[mask] = self._interpolate_log(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) else: - uath[mask] = self.interp1d(hup[mask], hlow[mask], - hhere.flatten()[mask], - uup[mask], ulow[mask]) + uath[mask] = self._interpolate_1d(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) uath = np.reshape(uath, hhere.shape) return uath @staticmethod - def interp1d(xup, xlow, at_x, yup, ylow): + def _interpolate_1d(xup, xlow, at_x, yup, ylow): """Simple 1D linear interpolation for 2D grid inputs level. Parameters: ---------- - xlow: 2D np.array (float) - lower x-bins xup: 2D np.array (float) upper x-bins + xlow: 2D np.array (float) + lower x-bins at_x: 2D np.array (float) x values to interpolate y at yup: 2D np.array(float) @@ -452,42 +471,42 @@ def interp1d(xup, xlow, at_x, yup, ylow): return interp @staticmethod - def loginterpol(x_u, x_l, at_x, y_u, y_l): + def _interpolate_log(xup, xlow, at_x, yup, ylow): """Simple 1D log interpolation y(x), except if lowest layer is ground level. Parameters: ---------- - x_l: 2D np.array (float) - lower x-bins - x_u: 2D np.array (float) + xup: 2D np.array (float) upper x-bins + xlow: 2D np.array (float) + lower x-bins at_x: 2D np.array (float) x values to interpolate y at - y_u: 2D np.array (float) - y(x_u) - y_l: 2D np.array (float) - y(x_l) + yup: 2D np.array(float) + y(xup) + ylow: 2D np.array (float) + y(xlow) Returns: ------- loginterp: 2D np.array (float) - y(at_x) assuming a log function between x_l and x_u + y(at_x) assuming a log function between xlow and xup """ - ain = np.full(x_u.shape, RMDI, dtype=float) - loginterp = np.full(x_u.shape, RMDI, dtype=float) - mfrac = x_u/x_l - mtest = (x_u/x_l != 1) & (at_x != x_u) - ain[mtest] = (y_u[mtest] - y_l[mtest])/np.log(mfrac[mtest]) - loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/x_u[mtest])+y_u[mtest] - mtest = (x_u/x_l == 1) # below lowest layer, make lin interp - loginterp[mtest] = at_x[mtest]/x_u[mtest] * (y_u[mtest]) - mtest = (at_x == x_u) # just use y_u - loginterp[mtest] = y_u[mtest] + ain = np.full(xup.shape, RMDI, dtype=float) + loginterp = np.full(xup.shape, RMDI, dtype=float) + mfrac = xup/xlow + mtest = (xup/xlow != 1) & (at_x != xup) + ain[mtest] = (yup[mtest] - ylow[mtest])/np.log(mfrac[mtest]) + loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/xup[mtest])+yup[mtest] + mtest = (xup/xlow == 1) # below lowest layer, make lin interp + loginterp[mtest] = at_x[mtest]/xup[mtest] * (yup[mtest]) + mtest = (at_x == xup) # just use yup + loginterp[mtest] = yup[mtest] return loginterp - def height_corr_sub(self, u_a, heightg, mask, onemfrac): + def _calc_height_corr(self, u_a, heightg, mask, onemfrac): """Function to calculate the additive height correction. Parameters: @@ -496,6 +515,8 @@ def height_corr_sub(self, u_a, heightg, mask, onemfrac): outer velocity, e.g. velocity at h_ref_orig heightg: 1D or 3D array heights above orography + mask: 3D array(logical) + Masks the hc_add result onemfrac: currently, scalar = 1. In principle, it is a function of position and height, e.g. a 3D array (float) @@ -511,30 +532,32 @@ def height_corr_sub(self, u_a, heightg, mask, onemfrac): offset h_at0 (the higher the unresolved hill), the larger is the disturbance. - The more smooth the distrubance (the larger the horizontal + The more smooth the disturbance (the larger the horizontal scale of the disturbance), the smaller the height correction (hence, a larger wavenumber results in a larger disturbance). - hc_add = exp(-height*wavenumber)*u(href)*h_at_0*k + hc_add = exp(-height*wavenumber)*u(href)*h_at_0*wavenumber + + A final factor of 1 is assumed and omitted for the Bessel + function term. """ (xdim, ydim) = u_a.shape if heightg.ndim == 1: zdim = heightg.shape[0] - heightg = heightg.reshape((1, 1, zdim)) + heightg = heightg[np.newaxis, np.newaxis, :] elif heightg.ndim == 3: zdim = heightg.shape[2] ml2 = self.h_at0*self.wavenum expon = np.ones([xdim, ydim, zdim]) - mult = (self.wavenum).reshape(self.wavenum.shape+(1,))*heightg + mult = self.wavenum[:, np.newaxis]*heightg expon[mult > 0.0001] = np.exp(-mult[mult > 0.0001]) hc_add = ( - expon*u_a.reshape(u_a.shape+(1,)) * - ml2.reshape(ml2.shape+(1,))*onemfrac) + expon*u_a[:, np.newaxis] * ml2[:, np.newaxis] * onemfrac) hc_add[~mask, :] = 0 return hc_add - def delta_height(self): + def _delta_height(self): """Function to calculate pp-grid diff from model grid. Calculate the difference between pp-grid height and model @@ -542,7 +565,7 @@ def delta_height(self): Returns: ------- - deltZ: 2D np.array (float) + delt_z: 2D np.array (float) height difference, ppgrid-model """ @@ -550,7 +573,7 @@ def delta_height(self): delt_z[self.hcmask] = self.pporo[self.hcmask]-self.modoro[self.hcmask] return delt_z - def do_rc_hc_all(self, hgrid, uorig): + def _do_rc_hc_all(self, hgrid, uorig): """Function to call HC and RC (height and roughness corrections). Parameters: @@ -576,14 +599,14 @@ def do_rc_hc_all(self, hgrid, uorig): mask_hc = np.copy(self.hcmask) mask_hc[(uorig == RMDI).any(axis=2)] = False if not self.l_no_winddownscale: - unew = self.roughness_correction_sub(hgrid, uorig, mask_rc) + unew = self.calc_roughness_correction(hgrid, uorig, mask_rc) else: unew = uorig - uhref_orig = self.calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) + uhref_orig = self._calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) mask_hc[uhref_orig <= 0] = False onemfrac = 1.0 # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) - hc_add = self.height_corr_sub(uhref_orig, hgrid, mask_hc, onemfrac) + hc_add = self._calc_height_corr(uhref_orig, hgrid, mask_hc, onemfrac) result = unew + hc_add result[result < 0.] = 0 # HC can be negative if pporo Date: Thu, 4 May 2017 16:27:38 +0100 Subject: [PATCH 0005/1367] Fix l_no_downscale --- lib/improver/wind_downscaling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 24aa59f278..e4802e584b 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -598,7 +598,7 @@ def _do_rc_hc_all(self, hgrid, uorig): mask_rc[(uorig == RMDI).any(axis=2)] = False mask_hc = np.copy(self.hcmask) mask_hc[(uorig == RMDI).any(axis=2)] = False - if not self.l_no_winddownscale: + if self.z_0 is not None: unew = self.calc_roughness_correction(hgrid, uorig, mask_rc) else: unew = uorig From 0a4cfb0e588ce182361e99a2a09cf4d4fb9c370f Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Mon, 27 Mar 2017 13:23:42 +0100 Subject: [PATCH 0006/1367] Add CLI example --- .gitignore | 3 ++ bin/improver | 77 +++++++++++++++++++++++++++++++++++++++++++++ bin/improver-nbhood | 59 ++++++++++++++++++++++++++++++++++ etc/VERSION | 1 + 4 files changed, 140 insertions(+) create mode 100755 bin/improver create mode 100755 bin/improver-nbhood create mode 100644 etc/VERSION diff --git a/.gitignore b/.gitignore index bb5d5b9214..fc9ec3a569 100644 --- a/.gitignore +++ b/.gitignore @@ -14,3 +14,6 @@ \#* \.\#* *.swp + +# Site-specific setup +etc/site-init diff --git a/bin/improver b/bin/improver new file mode 100755 index 0000000000..4c03a584de --- /dev/null +++ b/bin/improver @@ -0,0 +1,77 @@ +#!/bin/bash +#------------------------------------------------------------------------------ +# (C) British Crown Copyright 2017 Met Office. +#------------------------------------------------------------------------------ +# NAME +# improver - IMPROVER post-processing and verification operations +# +# SYNOPSIS +# improver OPERATION [OPTIONS] [ARGS...] # Invoke an IMPROVER operation +# improver help # Generic help across operations +# improver help OPERATION # Specific help for a particular operation +# improver version # Print out version information +# +# DESCRIPTION +# Launch particular operations for post-processing or verification of +# meteorological data. +#------------------------------------------------------------------------------ + +set -eu +export IMPROVER_DIR="$(cd $(dirname $0)/../ && pwd -P)" + + +# List all improver subcommands or operations. +get_operations() { + cd "$IMPROVER_DIR/bin/" + ls improver-* | sort | sed "s/^improver-//" +} + +# Print generic or operation specific help. +print_help() { + OPER=${1:-} + if [[ -n "$OPER" ]] && [[ $OPER != all ]]; then + # Operation-specific help. + "$IMPROVER_DIR/bin/improver" "$OPER" --help + else + # General help. + sed -n '/^# NAME/,/^#---/{/^#\-/d; s/^#//; s/^ //; p}' "$0" + echo + echo "OPERATIONS" + echo -n " improver " + get_operations | sed "s/ /\n improver /g" + fi +} + +# Print the version. +print_version() { + cat "$IMPROVER_DIR/etc/VERSION" +} + + +HELP_TARGET=all +if (($# == 0)); then + print_help + exit 0 +fi +if [[ $1 == help ]] || [[ $1 == --help ]]; then + print_help ${2:-} + exit 0 +fi +if [[ $1 == version ]] || [[ $1 == --version ]]; then + print_version + exit 0 +fi + +OPER=$1 +shift + +# Apply site-specific setup if necessary. +if [[ -f "$IMPROVER_DIR/etc/site-init" ]]; then + . "$IMPROVER_DIR/etc/site-init" +fi + +# Put our library and scripts in the paths. +export PYTHONPATH="$IMPROVER_DIR/lib/:$PYTHONPATH" +export PATH="$IMPROVER_DIR/bin/:$PATH" + +exec improver-$OPER "$@" diff --git a/bin/improver-nbhood b/bin/improver-nbhood new file mode 100755 index 0000000000..8ac68f2266 --- /dev/null +++ b/bin/improver-nbhood @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Script to run neighbourhood processing.""" + +import argparse + +import iris + +from improver.nbhood import BasicNeighbourhoodProcessing + + +def main(): + """Load in arguments and get going.""" + parser = argparse.ArgumentParser( + description='Do some basic neighbourhood processing') + parser.add_argument('--radius-in-km', metavar='RADIUS', type=float, + help='The kernel radius for neighbourhood processing') + parser.add_argument('input_filepath', metavar='INPUT_FILE', + help='A path to an input NetCDF file to be processed') + parser.add_argument('output_filepath', metavar='OUTPUT_FILE', + help='The output path for the processed NetCDF') + args = parser.parse_args() + cube = iris.load_cube(args.input_filepath) + result = BasicNeighbourhoodProcessing(args.radius_in_km).process(cube) + print 'Writing output:', args.output_filepath + iris.save(result, args.output_filepath, unlimited_dimensions=[]) + + +if __name__ == "__main__": + main() diff --git a/etc/VERSION b/etc/VERSION new file mode 100644 index 0000000000..9ef3eae8d1 --- /dev/null +++ b/etc/VERSION @@ -0,0 +1 @@ +IMPROVER 2017.03.0alpha From fa5d4fdeadc6f8dbb19759cd3e5ce281a31c2337 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 4 Apr 2017 21:31:41 +0100 Subject: [PATCH 0007/1367] Add BATS testing --- ACKNOWLEDGEMENTS.md | 3 + bin/improver | 2 +- bin/improver-nbhood | 2 +- bin/improver-tests | 24 ++ lib/improver/nbhood.py | 2 +- licences/MIT-BATS | 20 ++ tests/bin/bats | 142 ++++++++++ tests/bin/bats-exec-suite | 55 ++++ tests/bin/bats-exec-test | 346 ++++++++++++++++++++++++ tests/bin/bats-format-tap-stream | 165 +++++++++++ tests/bin/bats-preprocess | 52 ++++ tests/improver-nbhood/.00-null.bats.swp | Bin 0 -> 12288 bytes tests/improver-nbhood/00-null.bats | 9 + 13 files changed, 819 insertions(+), 3 deletions(-) create mode 100755 bin/improver-tests create mode 100644 licences/MIT-BATS create mode 100755 tests/bin/bats create mode 100755 tests/bin/bats-exec-suite create mode 100755 tests/bin/bats-exec-test create mode 100755 tests/bin/bats-format-tap-stream create mode 100755 tests/bin/bats-preprocess create mode 100644 tests/improver-nbhood/.00-null.bats.swp create mode 100644 tests/improver-nbhood/00-null.bats diff --git a/ACKNOWLEDGEMENTS.md b/ACKNOWLEDGEMENTS.md index ca91ad68b2..e19d093a0b 100644 --- a/ACKNOWLEDGEMENTS.md +++ b/ACKNOWLEDGEMENTS.md @@ -3,3 +3,6 @@ Credit for external pieces of work: Iris (https://github.com/SciTools/iris), LGPL: - .travis.yml, derived from Iris travis setup - .gitignore used as basis for ours + +BATS (https://github.com/sstephenson/bats), MIT-style: + - tests/bin/bats\*, unaltered diff --git a/bin/improver b/bin/improver index 4c03a584de..2bb0f626a5 100755 --- a/bin/improver +++ b/bin/improver @@ -17,8 +17,8 @@ #------------------------------------------------------------------------------ set -eu -export IMPROVER_DIR="$(cd $(dirname $0)/../ && pwd -P)" +export IMPROVER_DIR="$(cd $(dirname $0)/../ && pwd -P)" # List all improver subcommands or operations. get_operations() { diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 8ac68f2266..878ac64034 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -33,7 +33,7 @@ import argparse -import iris +#import iris from improver.nbhood import BasicNeighbourhoodProcessing diff --git a/bin/improver-tests b/bin/improver-tests new file mode 100755 index 0000000000..75408edbc5 --- /dev/null +++ b/bin/improver-tests @@ -0,0 +1,24 @@ +#!/bin/bash +#------------------------------------------------------------------------------ +# (C) British Crown Copyright 2017 Met Office. +#------------------------------------------------------------------------------ +# NAME +# improver tests - Run IMPROVER self-tests +# +# SYNOPSIS +# improver tests +# +# DESCRIPTION +# Launch all IMPROVER self-tests. +#------------------------------------------------------------------------------ + +set -eu + +PATH="$IMPROVER_DIR/tests/bin/:$PATH" +# Put our library and scripts in the paths. +if [[ ${1:-} != '--debug' ]] && type prove &>/dev/null; then + prove -j $(nproc) -r -e "bats --tap" \ + --ext ".bats" "$IMPROVER_DIR/tests/" +else + bats $(find "$IMPROVER_DIR/tests/" -name "*.bats") +fi diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 5475fac41b..4e66c81713 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -31,7 +31,7 @@ """Module containing neighbourhood processing utilities.""" -import iris +#import iris import numpy as np import scipy.ndimage.filters diff --git a/licences/MIT-BATS b/licences/MIT-BATS new file mode 100644 index 0000000000..bac4eb29cc --- /dev/null +++ b/licences/MIT-BATS @@ -0,0 +1,20 @@ +Copyright (c) 2014 Sam Stephenson + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/bin/bats b/tests/bin/bats new file mode 100755 index 0000000000..71f392f757 --- /dev/null +++ b/tests/bin/bats @@ -0,0 +1,142 @@ +#!/usr/bin/env bash +set -e + +version() { + echo "Bats 0.4.0" +} + +usage() { + version + echo "Usage: bats [-c] [-p | -t] [ ...]" +} + +help() { + usage + echo + echo " is the path to a Bats test file, or the path to a directory" + echo " containing Bats test files." + echo + echo " -c, --count Count the number of test cases without running any tests" + echo " -h, --help Display this help message" + echo " -p, --pretty Show results in pretty format (default for terminals)" + echo " -t, --tap Show results in TAP format" + echo " -v, --version Display the version number" + echo + echo " For more information, see https://github.com/sstephenson/bats" + echo +} + +resolve_link() { + $(type -p greadlink readlink | head -1) "$1" +} + +abs_dirname() { + local cwd="$(pwd)" + local path="$1" + + while [ -n "$path" ]; do + cd "${path%/*}" + local name="${path##*/}" + path="$(resolve_link "$name" || true)" + done + + pwd + cd "$cwd" +} + +expand_path() { + { cd "$(dirname "$1")" 2>/dev/null + local dirname="$PWD" + cd "$OLDPWD" + echo "$dirname/$(basename "$1")" + } || echo "$1" +} + +BATS_LIBEXEC="$(abs_dirname "$0")" +export BATS_PREFIX="$(abs_dirname "$BATS_LIBEXEC")" +export BATS_CWD="$(abs_dirname .)" +export PATH="$BATS_LIBEXEC:$PATH" + +options=() +arguments=() +for arg in "$@"; do + if [ "${arg:0:1}" = "-" ]; then + if [ "${arg:1:1}" = "-" ]; then + options[${#options[*]}]="${arg:2}" + else + index=1 + while option="${arg:$index:1}"; do + [ -n "$option" ] || break + options[${#options[*]}]="$option" + let index+=1 + done + fi + else + arguments[${#arguments[*]}]="$arg" + fi +done + +unset count_flag pretty +[ -t 0 ] && [ -t 1 ] && pretty="1" +[ -n "$CI" ] && pretty="" + +for option in "${options[@]}"; do + case "$option" in + "h" | "help" ) + help + exit 0 + ;; + "v" | "version" ) + version + exit 0 + ;; + "c" | "count" ) + count_flag="-c" + ;; + "t" | "tap" ) + pretty="" + ;; + "p" | "pretty" ) + pretty="1" + ;; + * ) + usage >&2 + exit 1 + ;; + esac +done + +if [ "${#arguments[@]}" -eq 0 ]; then + usage >&2 + exit 1 +fi + +filenames=() +for filename in "${arguments[@]}"; do + if [ -d "$filename" ]; then + shopt -s nullglob + for suite_filename in "$(expand_path "$filename")"/*.bats; do + filenames["${#filenames[@]}"]="$suite_filename" + done + shopt -u nullglob + else + filenames["${#filenames[@]}"]="$(expand_path "$filename")" + fi +done + +if [ "${#filenames[@]}" -eq 1 ]; then + command="bats-exec-test" +else + command="bats-exec-suite" +fi + +if [ -n "$pretty" ]; then + extended_syntax_flag="-x" + formatter="bats-format-tap-stream" +else + extended_syntax_flag="" + formatter="cat" +fi + +set -o pipefail execfail +exec "$command" $count_flag $extended_syntax_flag "${filenames[@]}" | "$formatter" diff --git a/tests/bin/bats-exec-suite b/tests/bin/bats-exec-suite new file mode 100755 index 0000000000..29ab255d06 --- /dev/null +++ b/tests/bin/bats-exec-suite @@ -0,0 +1,55 @@ +#!/usr/bin/env bash +set -e + +count_only_flag="" +if [ "$1" = "-c" ]; then + count_only_flag=1 + shift +fi + +extended_syntax_flag="" +if [ "$1" = "-x" ]; then + extended_syntax_flag="-x" + shift +fi + +trap "kill 0; exit 1" int + +count=0 +for filename in "$@"; do + let count+="$(bats-exec-test -c "$filename")" +done + +if [ -n "$count_only_flag" ]; then + echo "$count" + exit +fi + +echo "1..$count" +status=0 +offset=0 +for filename in "$@"; do + index=0 + { + IFS= read -r # 1..n + while IFS= read -r line; do + case "$line" in + "begin "* ) + let index+=1 + echo "${line/ $index / $(($offset + $index)) }" + ;; + "ok "* | "not ok "* ) + [ -n "$extended_syntax_flag" ] || let index+=1 + echo "${line/ $index / $(($offset + $index)) }" + [ "${line:0:6}" != "not ok" ] || status=1 + ;; + * ) + echo "$line" + ;; + esac + done + } < <( bats-exec-test $extended_syntax_flag "$filename" ) + offset=$(($offset + $index)) +done + +exit "$status" diff --git a/tests/bin/bats-exec-test b/tests/bin/bats-exec-test new file mode 100755 index 0000000000..8f3bd5102e --- /dev/null +++ b/tests/bin/bats-exec-test @@ -0,0 +1,346 @@ +#!/usr/bin/env bash +set -e +set -E +set -T + +BATS_COUNT_ONLY="" +if [ "$1" = "-c" ]; then + BATS_COUNT_ONLY=1 + shift +fi + +BATS_EXTENDED_SYNTAX="" +if [ "$1" = "-x" ]; then + BATS_EXTENDED_SYNTAX="$1" + shift +fi + +BATS_TEST_FILENAME="$1" +if [ -z "$BATS_TEST_FILENAME" ]; then + echo "usage: bats-exec " >&2 + exit 1 +elif [ ! -f "$BATS_TEST_FILENAME" ]; then + echo "bats: $BATS_TEST_FILENAME does not exist" >&2 + exit 1 +else + shift +fi + +BATS_TEST_DIRNAME="$(dirname "$BATS_TEST_FILENAME")" +BATS_TEST_NAMES=() + +load() { + local name="$1" + local filename + + if [ "${name:0:1}" = "/" ]; then + filename="${name}" + else + filename="$BATS_TEST_DIRNAME/${name}.bash" + fi + + [ -f "$filename" ] || { + echo "bats: $filename does not exist" >&2 + exit 1 + } + + source "${filename}" +} + +run() { + local e E T oldIFS + [[ ! "$-" =~ e ]] || e=1 + [[ ! "$-" =~ E ]] || E=1 + [[ ! "$-" =~ T ]] || T=1 + set +e + set +E + set +T + output="$("$@" 2>&1)" + status="$?" + oldIFS=$IFS + IFS=$'\n' lines=($output) + [ -z "$e" ] || set -e + [ -z "$E" ] || set -E + [ -z "$T" ] || set -T + IFS=$oldIFS +} + +setup() { + true +} + +teardown() { + true +} + +skip() { + BATS_TEST_SKIPPED=${1:-1} + BATS_TEST_COMPLETED=1 + exit 0 +} + +bats_test_begin() { + BATS_TEST_DESCRIPTION="$1" + if [ -n "$BATS_EXTENDED_SYNTAX" ]; then + echo "begin $BATS_TEST_NUMBER $BATS_TEST_DESCRIPTION" >&3 + fi + setup +} + +bats_test_function() { + local test_name="$1" + BATS_TEST_NAMES["${#BATS_TEST_NAMES[@]}"]="$test_name" +} + +bats_capture_stack_trace() { + BATS_PREVIOUS_STACK_TRACE=( "${BATS_CURRENT_STACK_TRACE[@]}" ) + BATS_CURRENT_STACK_TRACE=() + + local test_pattern=" $BATS_TEST_NAME $BATS_TEST_SOURCE" + local setup_pattern=" setup $BATS_TEST_SOURCE" + local teardown_pattern=" teardown $BATS_TEST_SOURCE" + + local frame + local index=1 + + while frame="$(caller "$index")"; do + BATS_CURRENT_STACK_TRACE["${#BATS_CURRENT_STACK_TRACE[@]}"]="$frame" + if [[ "$frame" = *"$test_pattern" || \ + "$frame" = *"$setup_pattern" || \ + "$frame" = *"$teardown_pattern" ]]; then + break + else + let index+=1 + fi + done + + BATS_SOURCE="$(bats_frame_filename "${BATS_CURRENT_STACK_TRACE[0]}")" + BATS_LINENO="$(bats_frame_lineno "${BATS_CURRENT_STACK_TRACE[0]}")" +} + +bats_print_stack_trace() { + local frame + local index=1 + local count="${#@}" + + for frame in "$@"; do + local filename="$(bats_trim_filename "$(bats_frame_filename "$frame")")" + local lineno="$(bats_frame_lineno "$frame")" + + if [ $index -eq 1 ]; then + echo -n "# (" + else + echo -n "# " + fi + + local fn="$(bats_frame_function "$frame")" + if [ "$fn" != "$BATS_TEST_NAME" ]; then + echo -n "from function \`$fn' " + fi + + if [ $index -eq $count ]; then + echo "in test file $filename, line $lineno)" + else + echo "in file $filename, line $lineno," + fi + + let index+=1 + done +} + +bats_print_failed_command() { + local frame="$1" + local status="$2" + local filename="$(bats_frame_filename "$frame")" + local lineno="$(bats_frame_lineno "$frame")" + + local failed_line="$(bats_extract_line "$filename" "$lineno")" + local failed_command="$(bats_strip_string "$failed_line")" + echo -n "# \`${failed_command}' " + + if [ $status -eq 1 ]; then + echo "failed" + else + echo "failed with status $status" + fi +} + +bats_frame_lineno() { + local frame="$1" + local lineno="${frame%% *}" + echo "$lineno" +} + +bats_frame_function() { + local frame="$1" + local rest="${frame#* }" + local fn="${rest%% *}" + echo "$fn" +} + +bats_frame_filename() { + local frame="$1" + local rest="${frame#* }" + local filename="${rest#* }" + + if [ "$filename" = "$BATS_TEST_SOURCE" ]; then + echo "$BATS_TEST_FILENAME" + else + echo "$filename" + fi +} + +bats_extract_line() { + local filename="$1" + local lineno="$2" + sed -n "${lineno}p" "$filename" +} + +bats_strip_string() { + local string="$1" + printf "%s" "$string" | sed -e "s/^[ "$'\t'"]*//" -e "s/[ "$'\t'"]*$//" +} + +bats_trim_filename() { + local filename="$1" + local length="${#BATS_CWD}" + + if [ "${filename:0:length+1}" = "${BATS_CWD}/" ]; then + echo "${filename:length+1}" + else + echo "$filename" + fi +} + +bats_debug_trap() { + if [ "$BASH_SOURCE" != "$1" ]; then + bats_capture_stack_trace + fi +} + +bats_error_trap() { + BATS_ERROR_STATUS="$?" + BATS_ERROR_STACK_TRACE=( "${BATS_PREVIOUS_STACK_TRACE[@]}" ) + trap - debug +} + +bats_teardown_trap() { + trap "bats_exit_trap" exit + local status=0 + teardown >>"$BATS_OUT" 2>&1 || status="$?" + + if [ $status -eq 0 ]; then + BATS_TEARDOWN_COMPLETED=1 + elif [ -n "$BATS_TEST_COMPLETED" ]; then + BATS_ERROR_STATUS="$status" + BATS_ERROR_STACK_TRACE=( "${BATS_CURRENT_STACK_TRACE[@]}" ) + fi + + bats_exit_trap +} + +bats_exit_trap() { + local status + local skipped + trap - err exit + + skipped="" + if [ -n "$BATS_TEST_SKIPPED" ]; then + skipped=" # skip" + if [ "1" != "$BATS_TEST_SKIPPED" ]; then + skipped+=" ($BATS_TEST_SKIPPED)" + fi + fi + + if [ -z "$BATS_TEST_COMPLETED" ] || [ -z "$BATS_TEARDOWN_COMPLETED" ]; then + echo "not ok $BATS_TEST_NUMBER $BATS_TEST_DESCRIPTION" >&3 + bats_print_stack_trace "${BATS_ERROR_STACK_TRACE[@]}" >&3 + bats_print_failed_command "${BATS_ERROR_STACK_TRACE[${#BATS_ERROR_STACK_TRACE[@]}-1]}" "$BATS_ERROR_STATUS" >&3 + sed -e "s/^/# /" < "$BATS_OUT" >&3 + status=1 + else + echo "ok ${BATS_TEST_NUMBER}${skipped} ${BATS_TEST_DESCRIPTION}" >&3 + status=0 + fi + + rm -f "$BATS_OUT" + exit "$status" +} + +bats_perform_tests() { + echo "1..$#" + test_number=1 + status=0 + for test_name in "$@"; do + "$0" $BATS_EXTENDED_SYNTAX "$BATS_TEST_FILENAME" "$test_name" "$test_number" || status=1 + let test_number+=1 + done + exit "$status" +} + +bats_perform_test() { + BATS_TEST_NAME="$1" + if [ "$(type -t "$BATS_TEST_NAME" || true)" = "function" ]; then + BATS_TEST_NUMBER="$2" + if [ -z "$BATS_TEST_NUMBER" ]; then + echo "1..1" + BATS_TEST_NUMBER="1" + fi + + BATS_TEST_COMPLETED="" + BATS_TEARDOWN_COMPLETED="" + trap "bats_debug_trap \"\$BASH_SOURCE\"" debug + trap "bats_error_trap" err + trap "bats_teardown_trap" exit + "$BATS_TEST_NAME" >>"$BATS_OUT" 2>&1 + BATS_TEST_COMPLETED=1 + + else + echo "bats: unknown test name \`$BATS_TEST_NAME'" >&2 + exit 1 + fi +} + +if [ -z "$TMPDIR" ]; then + BATS_TMPDIR="/tmp" +else + BATS_TMPDIR="${TMPDIR%/}" +fi + +BATS_TMPNAME="$BATS_TMPDIR/bats.$$" +BATS_PARENT_TMPNAME="$BATS_TMPDIR/bats.$PPID" +BATS_OUT="${BATS_TMPNAME}.out" + +bats_preprocess_source() { + BATS_TEST_SOURCE="${BATS_TMPNAME}.src" + { tr -d '\r' < "$BATS_TEST_FILENAME"; echo; } | bats-preprocess > "$BATS_TEST_SOURCE" + trap "bats_cleanup_preprocessed_source" err exit + trap "bats_cleanup_preprocessed_source; exit 1" int +} + +bats_cleanup_preprocessed_source() { + rm -f "$BATS_TEST_SOURCE" +} + +bats_evaluate_preprocessed_source() { + if [ -z "$BATS_TEST_SOURCE" ]; then + BATS_TEST_SOURCE="${BATS_PARENT_TMPNAME}.src" + fi + source "$BATS_TEST_SOURCE" +} + +exec 3<&1 + +if [ "$#" -eq 0 ]; then + bats_preprocess_source + bats_evaluate_preprocessed_source + + if [ -n "$BATS_COUNT_ONLY" ]; then + echo "${#BATS_TEST_NAMES[@]}" + else + bats_perform_tests "${BATS_TEST_NAMES[@]}" + fi +else + bats_evaluate_preprocessed_source + bats_perform_test "$@" +fi diff --git a/tests/bin/bats-format-tap-stream b/tests/bin/bats-format-tap-stream new file mode 100755 index 0000000000..614768f4d9 --- /dev/null +++ b/tests/bin/bats-format-tap-stream @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -e + +# Just stream the TAP output (sans extended syntax) if tput is missing +command -v tput >/dev/null || exec grep -v "^begin " + +header_pattern='[0-9]+\.\.[0-9]+' +IFS= read -r header + +if [[ "$header" =~ $header_pattern ]]; then + count="${header:3}" + index=0 + failures=0 + skipped=0 + name="" + count_column_width=$(( ${#count} * 2 + 2 )) +else + # If the first line isn't a TAP plan, print it and pass the rest through + printf "%s\n" "$header" + exec cat +fi + +update_screen_width() { + screen_width="$(tput cols)" + count_column_left=$(( $screen_width - $count_column_width )) +} + +trap update_screen_width WINCH +update_screen_width + +begin() { + go_to_column 0 + printf_with_truncation $(( $count_column_left - 1 )) " %s" "$name" + clear_to_end_of_line + go_to_column $count_column_left + printf "%${#count}s/${count}" "$index" + go_to_column 1 +} + +pass() { + go_to_column 0 + printf " ✓ %s" "$name" + advance +} + +skip() { + local reason="$1" + [ -z "$reason" ] || reason=": $reason" + go_to_column 0 + printf " - %s (skipped%s)" "$name" "$reason" + advance +} + +fail() { + go_to_column 0 + set_color 1 bold + printf " ✗ %s" "$name" + advance +} + +log() { + set_color 1 + printf " %s\n" "$1" + clear_color +} + +summary() { + printf "\n%d test%s" "$count" "$(plural "$count")" + + printf ", %d failure%s" "$failures" "$(plural "$failures")" + + if [ "$skipped" -gt 0 ]; then + printf ", %d skipped" "$skipped" + fi + + printf "\n" +} + +printf_with_truncation() { + local width="$1" + shift + local string="$(printf "$@")" + + if [ "${#string}" -gt "$width" ]; then + printf "%s..." "${string:0:$(( $width - 4 ))}" + else + printf "%s" "$string" + fi +} + +go_to_column() { + local column="$1" + printf "\x1B[%dG" $(( $column + 1 )) +} + +clear_to_end_of_line() { + printf "\x1B[K" +} + +advance() { + clear_to_end_of_line + echo + clear_color +} + +set_color() { + local color="$1" + local weight="$2" + printf "\x1B[%d;%dm" $(( 30 + $color )) "$( [ "$weight" = "bold" ] && echo 1 || echo 22 )" +} + +clear_color() { + printf "\x1B[0m" +} + +plural() { + [ "$1" -eq 1 ] || echo "s" +} + +_buffer="" + +buffer() { + _buffer="${_buffer}$("$@")" +} + +flush() { + printf "%s" "$_buffer" + _buffer="" +} + +finish() { + flush + printf "\n" +} + +trap finish EXIT + +while IFS= read -r line; do + case "$line" in + "begin "* ) + let index+=1 + name="${line#* $index }" + buffer begin + flush + ;; + "ok "* ) + skip_expr="ok $index # skip (\(([^)]*)\))?" + if [[ "$line" =~ $skip_expr ]]; then + let skipped+=1 + buffer skip "${BASH_REMATCH[2]}" + else + buffer pass + fi + ;; + "not ok "* ) + let failures+=1 + buffer fail + ;; + "# "* ) + buffer log "${line:2}" + ;; + esac +done + +buffer summary diff --git a/tests/bin/bats-preprocess b/tests/bin/bats-preprocess new file mode 100755 index 0000000000..04297ed019 --- /dev/null +++ b/tests/bin/bats-preprocess @@ -0,0 +1,52 @@ +#!/usr/bin/env bash +set -e + +encode_name() { + local name="$1" + local result="test_" + + if [[ ! "$name" =~ [^[:alnum:]\ _-] ]]; then + name="${name//_/-5f}" + name="${name//-/-2d}" + name="${name// /_}" + result+="$name" + else + local length="${#name}" + local char i + + for ((i=0; icDomK_(N#5(xIBfug-5XP z6yCxLui$b<>%j;SJMvfZcXf4FpYHmWGZp35^9wXSG$y!p*AHpC+!i8IIval58&&(! z%u_Dtmn+pIuBpVg%2?y;Xg7{aelF)}x4%CWh1PugF5H9w1b!>fAx1}rbN#Z9dV7N3 z7Qu56fB*y_009U<00Izzz#kQ`)fVx@-`>h;=d`?rJ2VJD00Izz00bZa0SG_<0uX=z z1pbLYSQ6s4QwZ6`3*!I(>%Rfq^Z6a;nsdcD=WKFjIRl*f+-^=A=aHX%;M{O7IA@$= zPK7hW8RVcr00Izz00bZa0SG_<0uX?}UlKSGM1=y$f!NuqvyvuHd7uufN_>55+p3bJ zVo^|jaXDz3*feaEY`WewY)s=fe=NVYHJ6{CrKMnPIY`eXf3rILvcS6=*%Gq;ZIz#| zWb6Lf+taU{l+J`ZYJ#%;?_Rp|oeJJ=kq=%Hmdeg}W$o=%Xk~gPAFLKPGIC=>>x?8% y9ni=NCnZidYEfECOx5?)lYFhLV+xZUS5eyXqenvY_xa8wepJ@J(lvV7YyS)#FtC^a literal 0 HcmV?d00001 diff --git a/tests/improver-nbhood/00-null.bats b/tests/improver-nbhood/00-null.bats new file mode 100644 index 0000000000..9d414a1fbb --- /dev/null +++ b/tests/improver-nbhood/00-null.bats @@ -0,0 +1,9 @@ +#!/usr/bin/env bats + +@test "nbhood no arguments" { + run improver nbhood + [[ "$status" -eq 2 ]] + expected="usage: improver-nbhood [-h] [--radius-in-km RADIUS]\ + INPUT_FILE OUTPUT_FILE" + [[ "$output" =~ "$expected" ]] +} From 633985b0017138655817a62bdb2147a8a01333c0 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Thu, 20 Apr 2017 15:07:41 +0100 Subject: [PATCH 0008/1367] Fix nbhood import iris --- lib/improver/nbhood.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 4e66c81713..5475fac41b 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -31,7 +31,7 @@ """Module containing neighbourhood processing utilities.""" -#import iris +import iris import numpy as np import scipy.ndimage.filters From 9191ae1795c18d6641d8348a798486817de19a9b Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 16 May 2017 09:25:33 +0100 Subject: [PATCH 0009/1367] All-inclusive tests command --- .travis.yml | 5 +---- bin/improver | 10 +++++----- bin/improver-nbhood | 2 +- bin/improver-tests | 6 ++++++ 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9e2130a099..09bff45418 100644 --- a/.travis.yml +++ b/.travis.yml @@ -65,7 +65,4 @@ install: script: - python -c "import iris" - - cd lib - - pep8 improver - - pylint --extension-pkg-whitelist=numpy -E --rcfile=../etc/pylintrc improver - - python -m unittest discover + - bin/improver tests diff --git a/bin/improver b/bin/improver index 2bb0f626a5..81ae270ea7 100755 --- a/bin/improver +++ b/bin/improver @@ -13,7 +13,8 @@ # # DESCRIPTION # Launch particular operations for post-processing or verification of -# meteorological data. +# meteorological data. This script is a central launcher for all +# IMPROVER subcommands (improver-xxxx) and central help. #------------------------------------------------------------------------------ set -eu @@ -23,7 +24,7 @@ export IMPROVER_DIR="$(cd $(dirname $0)/../ && pwd -P)" # List all improver subcommands or operations. get_operations() { cd "$IMPROVER_DIR/bin/" - ls improver-* | sort | sed "s/^improver-//" + ls improver-*[^~] | sort | sed "s/^improver-//" } # Print generic or operation specific help. @@ -37,8 +38,7 @@ print_help() { sed -n '/^# NAME/,/^#---/{/^#\-/d; s/^#//; s/^ //; p}' "$0" echo echo "OPERATIONS" - echo -n " improver " - get_operations | sed "s/ /\n improver /g" + get_operations | sed "s/^/ improver /" fi } @@ -71,7 +71,7 @@ if [[ -f "$IMPROVER_DIR/etc/site-init" ]]; then fi # Put our library and scripts in the paths. -export PYTHONPATH="$IMPROVER_DIR/lib/:$PYTHONPATH" +export PYTHONPATH="$IMPROVER_DIR/lib/:${PYTHONPATH:-}" export PATH="$IMPROVER_DIR/bin/:$PATH" exec improver-$OPER "$@" diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 878ac64034..8ac68f2266 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -33,7 +33,7 @@ import argparse -#import iris +import iris from improver.nbhood import BasicNeighbourhoodProcessing diff --git a/bin/improver-tests b/bin/improver-tests index 75408edbc5..d235669a2f 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -14,6 +14,12 @@ set -eu +cd $IMPROVER_DIR/lib +pep8 improver +pylint -E --extension-pkg-whitelist=numpy --rcfile=../etc/pylintrc improver +python -m unittest discover + + PATH="$IMPROVER_DIR/tests/bin/:$PATH" # Put our library and scripts in the paths. if [[ ${1:-} != '--debug' ]] && type prove &>/dev/null; then From 1fdd7cd55774efa581e2ccc2fa47cd9547154aaf Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 16 May 2017 11:38:16 +0100 Subject: [PATCH 0010/1367] Unified test command and update pylintrc for site --- bin/improver-tests | 7 +++++-- etc/pylintrc | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/bin/improver-tests b/bin/improver-tests index d235669a2f..61d8f9f2d2 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -15,8 +15,8 @@ set -eu cd $IMPROVER_DIR/lib -pep8 improver -pylint -E --extension-pkg-whitelist=numpy --rcfile=../etc/pylintrc improver +${PEP8:-pep8} improver +${PYLINT:-pylint} -E --rcfile=../etc/pylintrc improver python -m unittest discover @@ -28,3 +28,6 @@ if [[ ${1:-} != '--debug' ]] && type prove &>/dev/null; then else bats $(find "$IMPROVER_DIR/tests/" -name "*.bats") fi + +echo +echo "All tests passed OK." diff --git a/etc/pylintrc b/etc/pylintrc index 612c6c6fc8..efa1363828 100644 --- a/etc/pylintrc +++ b/etc/pylintrc @@ -1,2 +1,6 @@ +[MASTER] +extension-pkg-whitelist=numpy,scipy + [TYPECHECK] -ignored-classes=numpy,tuple,WeightedAggregator +ignored-classes=tuple,WeightedAggregator,MaskedArray +ignored-modules=scipy.stats From af232e36145a132dc4078c8dc20d915f9e7d5ddb Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 16 May 2017 12:04:34 +0100 Subject: [PATCH 0011/1367] Better formatting and Travis pylint fix --- bin/improver-tests | 21 +++++++++++++++++---- etc/pylintrc | 2 +- 2 files changed, 18 insertions(+), 5 deletions(-) diff --git a/bin/improver-tests b/bin/improver-tests index 61d8f9f2d2..046bb1a8e0 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -14,20 +14,33 @@ set -eu +function echo_ok { + echo -e "\033[1;32m[OK]\033[0m $1" +} + cd $IMPROVER_DIR/lib + +# PEP8 testing. ${PEP8:-pep8} improver +echo_ok "pep8" + +# Pylint obvious-errors-only testing. ${PYLINT:-pylint} -E --rcfile=../etc/pylintrc improver -python -m unittest discover +echo_ok "pylint -E" +# Unit tests. +python -m unittest discover +echo_ok "Unit tests" +# CLI testing. PATH="$IMPROVER_DIR/tests/bin/:$PATH" -# Put our library and scripts in the paths. if [[ ${1:-} != '--debug' ]] && type prove &>/dev/null; then prove -j $(nproc) -r -e "bats --tap" \ --ext ".bats" "$IMPROVER_DIR/tests/" else bats $(find "$IMPROVER_DIR/tests/" -name "*.bats") fi +echo_ok "CLI tests" -echo -echo "All tests passed OK." +# No errors found (or script would have exited). +echo_ok "All tests passed." diff --git a/etc/pylintrc b/etc/pylintrc index efa1363828..bc38a1d377 100644 --- a/etc/pylintrc +++ b/etc/pylintrc @@ -3,4 +3,4 @@ extension-pkg-whitelist=numpy,scipy [TYPECHECK] ignored-classes=tuple,WeightedAggregator,MaskedArray -ignored-modules=scipy.stats +ignored-modules=numpy,scipy,scipy.stats From c17915cb1225796a633b5e67daa41c975505c858 Mon Sep 17 00:00:00 2001 From: "caroline.jones" Date: Tue, 16 May 2017 15:40:38 +0100 Subject: [PATCH 0012/1367] Adding a first draft of a weighted blending plugin --- ...est_weighted_blend_BasicWeightedAverage.py | 76 +++++++++++++++ lib/improver/weighted_blend.py | 95 +++++++++++++++++++ 2 files changed, 171 insertions(+) create mode 100644 lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py create mode 100644 lib/improver/weighted_blend.py diff --git a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py new file mode 100644 index 0000000000..9aef1d7cf3 --- /dev/null +++ b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for the weighted_blend.BasicWeightedAverage plugin.""" + + +import unittest + +from cf_units import Unit +from iris.coords import AuxCoord, DimCoord +from iris.cube import Cube +from iris.tests import IrisTest +import numpy as np + +from improver.weighted_blend import BasicWeightedAverage + + +class TestBasicWeightedAverage(IrisTest): + + """Test the Basic Weighted Average plugin.""" + + def setUp(self): + """Create a cube with a single non-zero point.""" + data = np.zeros((2, 5, 5)) + data[0][:][:] = 0.0 + data[1][:][:] = 1.0 + cube = Cube(data, standard_name="precipitation_amount", + units="kg m^-2 s^-1") + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 5), 'latitude', + units='degrees'), 1) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, 5), 'longitude', + units='degrees'), 2) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_aux_coord(AuxCoord([402192.5,402193.5], + "time", units=tunit), 0) + self.cube = cube + + def test_basic(self): + """Test that the plugin returns an iris.cube.Cube.""" + coord = "time" + plugin = BasicWeightedAverage(coord) + result = plugin.process(self.cube) + self.assertIsInstance(result, Cube) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/weighted_blend.py b/lib/improver/weighted_blend.py new file mode 100644 index 0000000000..5c5fe23db8 --- /dev/null +++ b/lib/improver/weighted_blend.py @@ -0,0 +1,95 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module containing Weighted Blend classes.""" + + +import iris + + +class BasicWeightedAverage(object): + """Apply a Basic Weighted Average to a cube. + + """ + + def __init__(self, coord, coord_adjust=None): + """Set up for processing an in-or-out of threshold binary field. + + Parameters + ---------- + + coord : string + The name of a coordinate dimension in the cube + + coord_adjust : + + """ + self.coord = coord + self.coord_adjust = coord_adjust + + def __str__(self): + """Represent the configured plugin instance as a string.""" + return ( + '').format(self.coord) + + def process(self, cube, weights=None): + """Convert each point to a fuzzy truth value based on threshold. + + Parameters + ---------- + + cube : iris.cube.Cube + Cube to blend across the coord. + + weights: array of weights + + """ + if not isinstance(cube, iris.cube.Cube): + raise ValueError('the first argument must be an instance of ' + + 'iris.cube.Cube') + if not cube.coords(self.coord): + raise ValueError('the second argument must be ' + + 'an existing coordinate in the input cube') + collapse_dim = cube.coord_dims(self.coord) + if not collapse_dim: + cube = iris.util.new_axis(cube, self.coord) + collapse_dim = cube.coord_dims(self.coord) + if weights is not None: + weights = iris.util.broadcast_to_shape(np.array(weights), + cube.shape, collapse_dim) + result = cube.collapsed(coord, iris.analysis.MEAN, weights=weights) + if self.coord_adjust is not None: + # adjust values of collapsed coordinates + for crd in result.coords(): + if cube.coord_dims(crd.name()) == collapse_dim: + pnts = cube.coord(crd.name()).points + crd.points = np.array(self.coord_adjust(pnts), + dtype=crd.points.dtype) + return result From 836551b5f7b63bafae1693ba0896e182badb3676 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 18 May 2017 10:35:21 +0100 Subject: [PATCH 0013/1367] Fixed unit tests by editing use of np.newaxis. --- lib/improver/wind_downscaling.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index e4802e584b..61b69e7393 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -369,9 +369,9 @@ def calc_roughness_correction(self, hgrid, uold, mask): unew = np.copy(uold) mhref = self.h_ref mhref[~mask] = RMDI - cond = hgrid < self.h_ref[:, np.newaxis] + cond = hgrid < self.h_ref[:, :, np.newaxis] unew[cond] = ( - ustar[:, np.newaxis]*np.ones(unew.shape) + ustar[:, :, np.newaxis]*np.ones(unew.shape) )[cond] * ( np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) * np.ones(unew.shape)))[cond])/VONKARMAN @@ -406,9 +406,9 @@ def _calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): # Ignores the height at the position where u_in is RMDI,"hops over" hhere = np.ma.masked_less(hhere, 0.0) - upidx = np.argmax(h_in > hhere[:, np.newaxis], axis=2) + upidx = np.argmax(h_in > hhere[:, :, np.newaxis], axis=2) # loidx = np.maximum(upidx-1, 0) #if RMDI, need below - loidx = np.argmin(np.ma.masked_less(hhere[:, np.newaxis] - + loidx = np.argmin(np.ma.masked_less(hhere[:, :, np.newaxis] - h_in, 0.0), axis=2) if h_in.ndim == 3: @@ -550,10 +550,10 @@ def _calc_height_corr(self, u_a, heightg, mask, onemfrac): zdim = heightg.shape[2] ml2 = self.h_at0*self.wavenum expon = np.ones([xdim, ydim, zdim]) - mult = self.wavenum[:, np.newaxis]*heightg + mult = self.wavenum[:, :, np.newaxis]*heightg expon[mult > 0.0001] = np.exp(-mult[mult > 0.0001]) hc_add = ( - expon*u_a[:, np.newaxis] * ml2[:, np.newaxis] * onemfrac) + expon*u_a[:, :, np.newaxis] * ml2[:, :, np.newaxis] * onemfrac) hc_add[~mask, :] = 0 return hc_add From 8ee358654a5950d11fb220f1296c0f0f87c9f14c Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 19 May 2017 08:12:51 +0100 Subject: [PATCH 0014/1367] Edits to wind downscaling to try to make improvements, such as making exceptions more specific, and simplifying a few operations. --- ...st_wind_downscaling_roughnesscorrection.py | 4 +- lib/improver/wind_downscaling.py | 162 +++++++++--------- 2 files changed, 79 insertions(+), 87 deletions(-) diff --git a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py index 31f9f58be8..71732428ef 100644 --- a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py +++ b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py @@ -70,7 +70,7 @@ def set_up_cube(num_time_points=1, num_grid_points=1, num_height_levels=7, cube.add_aux_coord( AuxCoord(height[i_idx], "height", units=Unit("meter"))) cubel1.append(cube) - cubel.append(cubel1.merge()[0]) + cubel.append(cubel1.merge_cube()) cubel = cubel.merge(0) cube = cubel[0] if data is not None: @@ -125,7 +125,7 @@ def __init__(self, nx_ny=3, AoS=None, Sigma=None, z_0=0.2, pporog=None, Parameters ---------- - nxny: a scalar or an np.array([x,y]) + nx_ny: a scalar or an np.array([x,y]) Sets dimension for tests. AoS: float or 1D or 2D array Silhouette roughness field diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 61b69e7393..0252356304 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -36,6 +36,7 @@ from cf_units import Unit import iris +from iris.exceptions import CoordinateNotFoundError import numpy as np from improver.constants import RMDI @@ -589,6 +590,9 @@ def _do_rc_hc_all(self, hgrid, uorig): sum of unew: 3D array (float) RC corrected windspeed on levels HC: 3D array (float) HC additional part + Friedrich, M. M., 2016 + Wind Downscaling Program (Internal Met Office Report) + """ if hgrid.ndim == 3: condition1 = ((hgrid == RMDI).any(axis=2)) @@ -604,8 +608,12 @@ def _do_rc_hc_all(self, hgrid, uorig): unew = uorig uhref_orig = self._calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) mask_hc[uhref_orig <= 0] = False - onemfrac = 1.0 + # Setting this value to 1, is equivalent to setting the + # Bessel function to 1. (Friedrich, 2016) + # Example usage if the Bessel function was not set to 1 is: # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) + onemfrac = 1.0 + hc_add = self._calc_height_corr(uhref_orig, hgrid, mask_hc, onemfrac) result = unew + hc_add result[result < 0.] = 0 # HC can be negative if pporo Date: Fri, 19 May 2017 08:34:31 +0100 Subject: [PATCH 0015/1367] Pep8 and pylint improvements. --- lib/improver/wind_downscaling.py | 36 +++++++++++++------------------- 1 file changed, 14 insertions(+), 22 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 0252356304..725ec7145a 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -574,7 +574,7 @@ def _delta_height(self): delt_z[self.hcmask] = self.pporo[self.hcmask]-self.modoro[self.hcmask] return delt_z - def _do_rc_hc_all(self, hgrid, uorig): + def do_rc_hc_all(self, hgrid, uorig): """Function to call HC and RC (height and roughness corrections). Parameters: @@ -664,9 +664,6 @@ def __init__(self, a_over_s_cube, sigma_cube, pporo_cube, self.z_0 = next(z0_cube.slices([y_name, x_name])) except AttributeError: self.z_0 = z0_cube - except Exception as exc: - emsg = "'{0}' while z0 setting. Arguments '{1}'." - raise ValueError(emsg.format(exc.message, exc.args)) self.pp_oro = next(pporo_cube.slices([y_name, x_name])) self.model_oro = next(modoro_cube.slices([y_name, x_name])) self.ppres = self.calc_av_ppgrid_res(pporo_cube) @@ -701,28 +698,24 @@ def find_coord_names(self, cube): range(len(cube.coords()))]) try: xname = cube.coord(axis="x").name() - except Exception as exc: + except CoordinateNotFoundError as exc: print("'{0}' while xname setting. Args: {1}.".format(exc.message, exc.args)) try: yname = cube.coord(axis="y").name() - except Exception as exc: + except CoordinateNotFoundError as exc: print("'{0}' while yname setting. Args: {1}.".format(exc.message, exc.args)) - try: + if clist.intersection(self.zcoordnames): zname = list(clist.intersection(self.zcoordnames))[0] - except IndexError: + else: zname = None - except Exception as exc: - print("'{0}' while zname setting. Args: {1}.".format(exc.message, - exc.args)) - try: + + if clist.intersection(self.tcoordnames): tname = list(clist.intersection(self.tcoordnames))[0] - except IndexError: + else: tname = None - except Exception as exc: - print("'{0}' while tname setting. Args: {1}.".format(exc.message, - exc.args)) + return xname, yname, zname, tname def calc_av_ppgrid_res(self, a_cube): @@ -792,8 +785,8 @@ def check_ancils(a_over_s_cube, sigma_cube, z0_cube, pp_oro_cube, unwanted_coord_list = [ "time", "height", "model_level_number", "forecast_time", "forecast_reference_time", "forecast_period"] - for field, exp_unit in zip(ancil_list, [None, Unit("m"), Unit("m"), - Unit("m")]): + for field, exp_unit in zip(ancil_list, [None, Unit("m"), + Unit("m"), Unit("m")]): for unwanted_coord in unwanted_coord_list: try: field.remove_coord(unwanted_coord) @@ -861,8 +854,7 @@ def find_coord_order(self, mcube): coord_dimension = np.nan positions.append(coord_dimension) - xpos, ypos, zpos, tpos = positions - return xpos, ypos, zpos, tpos + return positions def find_heightgrid(self, wind): """Setup the height grid. @@ -968,8 +960,8 @@ def process(self, input_cube): msg = ('{} has invalid wind data') raise ValueError(msg.format(time_slice.coord(self.t_name))) rc_hc = copy.deepcopy(time_slice) - rc_hc.data = roughness_correction._do_rc_hc_all(hld, - time_slice.data) + rc_hc.data = roughness_correction.do_rc_hc_all( + hld, time_slice.data) rchc_list.append(rc_hc) output_cube = rchc_list.merge_cube() # reorder input_cube and output_cube as original From f95d93905e4baa21e7060343259d9f6cee2feb72 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 19 May 2017 08:48:24 +0100 Subject: [PATCH 0016/1367] Pylint edits. --- lib/improver/wind_downscaling.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 725ec7145a..87e51d2df8 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -845,15 +845,12 @@ def find_coord_order(self, mcube): """ coord_names = [self.x_name, self.y_name, self.z_name, self.t_name] - positions = [] - for coord_name in coord_names: + positions = [np.nan, np.nan, np.nan, np.nan] + for coord_index, coord_name in enumerate(coord_names): if mcube.coords(coord_name, dim_coords=True): coord_dimension = mcube.coord_dims(coord_name) coord_dimension = coord_dimension[0] - else: - coord_dimension = np.nan - positions.append(coord_dimension) - + positions[coord_index] = coord_dimension return positions def find_heightgrid(self, wind): From 1d5e6abbe2e2da5224c0011a987dc088b37696a9 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Fri, 19 May 2017 09:20:07 +0100 Subject: [PATCH 0017/1367] Updated --- ACKNOWLEDGEMENTS.md | 2 +- bin/improver-nbhood | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/ACKNOWLEDGEMENTS.md b/ACKNOWLEDGEMENTS.md index e19d093a0b..891226b86d 100644 --- a/ACKNOWLEDGEMENTS.md +++ b/ACKNOWLEDGEMENTS.md @@ -5,4 +5,4 @@ Iris (https://github.com/SciTools/iris), LGPL: - .gitignore used as basis for ours BATS (https://github.com/sstephenson/bats), MIT-style: - - tests/bin/bats\*, unaltered + - tests/bin/bats\*, unaltered, from commit 0360811 diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 8ac68f2266..691b4c7aa4 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -41,7 +41,9 @@ from improver.nbhood import BasicNeighbourhoodProcessing def main(): """Load in arguments and get going.""" parser = argparse.ArgumentParser( - description='Do some basic neighbourhood processing') + description='Apply basic weighted circle smoothing via ' + + 'the BasicNeighbourhoodProcessing plugin ' + + 'to a file with one cube.') parser.add_argument('--radius-in-km', metavar='RADIUS', type=float, help='The kernel radius for neighbourhood processing') parser.add_argument('input_filepath', metavar='INPUT_FILE', @@ -51,7 +53,6 @@ def main(): args = parser.parse_args() cube = iris.load_cube(args.input_filepath) result = BasicNeighbourhoodProcessing(args.radius_in_km).process(cube) - print 'Writing output:', args.output_filepath iris.save(result, args.output_filepath, unlimited_dimensions=[]) From f45498888ca36f5d82f35d6de49aa5abce74e668 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Tue, 18 Apr 2017 14:27:51 +0100 Subject: [PATCH 0018/1367] Add wind downscaling algorithm --- lib/improver/constants.py | 34 + ...st_wind_downscaling_roughnesscorrection.py | 749 ++++++++++++++ lib/improver/wind_downscaling.py | 964 ++++++++++++++++++ 3 files changed, 1747 insertions(+) create mode 100644 lib/improver/constants.py create mode 100644 lib/improver/tests/test_wind_downscaling_roughnesscorrection.py create mode 100644 lib/improver/wind_downscaling.py diff --git a/lib/improver/constants.py b/lib/improver/constants.py new file mode 100644 index 0000000000..b02eb31917 --- /dev/null +++ b/lib/improver/constants.py @@ -0,0 +1,34 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module to contain generally useful constants.""" + +# Real Missing Data Indicator +RMDI = -32767.0 diff --git a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py new file mode 100644 index 0000000000..ac702b8a22 --- /dev/null +++ b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py @@ -0,0 +1,749 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for plugin wind_downscaling.RoughnessCorrection.""" + +import unittest + + +from cf_units import Unit +import iris +from iris.coords import AuxCoord +from iris.tests import IrisTest +import numpy as np + +from improver.grids.osgb import OSGBGRID +from improver.constants import RMDI +from improver.wind_downscaling import RoughnessCorrection + + +def set_up_cube(num_time_points=1, num_grid_points=1, num_height_levels=7, + data=None, name=None, unit=None, height=None): + """Set up a normal OSGB UK National Grid cube.""" + cubel = iris.cube.CubeList() + tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian") + t_0 = 402192.5 + if isinstance(num_grid_points, int): + num_grid_points_x = num_grid_points_y = num_grid_points + else: + num_grid_points_x = num_grid_points[0] + num_grid_points_y = num_grid_points[1] + for i_idx in range(num_height_levels): + cubel1 = iris.cube.CubeList() + for j_idx in range(num_time_points): + cube = OSGBGRID + cube = cube[:num_grid_points_x, :num_grid_points_y] + cube.add_aux_coord(AuxCoord(t_0 + j_idx, "time", units=tunit)) + if height is None: + cube.add_aux_coord(AuxCoord(i_idx, "model_level_number")) + elif isinstance(height, float) or isinstance(height, int): + cube.add_aux_coord( + AuxCoord(height, "height", units=Unit("meter"))) + else: + cube.add_aux_coord( + AuxCoord(height[i_idx], "height", units=Unit("meter"))) + cubel1.append(cube) + cubel.append(cubel1.merge()[0]) + cubel = cubel.merge(0) + cube = cubel[0] + if data is not None: + try: + data = np.array(data) + cube.data = data.reshape(cube.data.shape) + except ValueError as ex: + if ex.message == "total size of new array must be unchanged": + msg = ("supplied data does not fit the cube." + "cube dimensions: {} vs. supplied data {}") + raise ValueError(msg.format(cube.shape, data.shape)) + else: + raise ValueError(ex) + + if name is not None: + try: + cube.standard_name = name + except ValueError as ex: + msg = "error trying to set the supplied name as cube data name: " + raise ValueError(msg + ex.message) + except TypeError as ex: + msg = ("error trying to set the supplied name as cube data name: " + "the name should be string and have a valid variable name ") + raise ValueError(msg + ex.message) + if unit is not None: + try: + cube.units = Unit(unit) + except ValueError as ex: + msg = "error trying to set Units to cube. supplied unit: {}" + raise ValueError(msg.format(unit)) + return cube + + +class TestMultiPoint(object): + + """Test (typically) 3 x 1 or 3x3 point tests. + + The size can be set by nxny, which is either a scalar or an + np.array([x,y]). It constructs cubes for the ancillary fields + Silhouette roughness (AoS), standard deviation of model height grid + cell (Sigma), vegetative roughness (z_0), post-processing grid + orography (pporog) and model orography (modelorog). If no values + are supplied, the grids that are set up have equal values at all + x-y points: AoS = 0.2, Sigma = 20, z_0 = 0.2, pporog = 250, + modelorog = 230. + + """ + + def __init__(self, nx_ny=3, AoS=None, Sigma=None, z_0=0.2, pporog=None, + modelorog=None): + """Set up multi-point tests. + + Parameters + ---------- + nxny: a scalar or an np.array([x,y]) + Sets dimension for tests. + AoS: float or 1D or 2D array + Silhouette roughness field + Sigma: float or 1D or 2D array + Standard deviation field of height in grid cell + z_0: float or 1D or 2D array + Vegetative roughness field + pporog: float or 1D or 2D array + Unsmoothed orography field on post-processing grid + modelorog: float or 1D or 2D array + Model orography field on post-processing grid + + """ + if isinstance(nx_ny, int): + n_x = n_y = nx_ny + else: + n_x = nx_ny[0] + n_y = nx_ny[1] + self.n_x = n_x + self.n_y = n_y + if AoS is None: + AoS = np.ones([n_x, n_y])*0.2 + if Sigma is None: + Sigma = np.ones([n_x, n_y])*20.0 + if pporog is None: + pporog = np.ones([n_x, n_y])*250.0 + if modelorog is None: + modelorog = np.ones([n_x, n_y])*230.0 + self.w_cube = None + self.aos_cube = set_up_cube(1, [n_x, n_y], 1, data=AoS, height=0, + name=None, unit=None) + self.s_cube = set_up_cube(1, [n_x, n_y], 1, data=Sigma, height=0, + name=None, unit="m") + if z_0 is None: + self.z0_cube = None + elif isinstance(z_0, float): + z_0 = np.ones([n_x, n_y])*z_0 + self.z0_cube = set_up_cube(1, [n_x, n_y], 1, data=z_0, height=0, + name=None, unit="m") + elif isinstance(z_0, list): + z_0 = np.array(z_0) + self.z0_cube = set_up_cube(1, [n_x, n_y], 1, data=z_0, height=0, + name=None, unit="m") + self.poro_cube = set_up_cube(1, [n_x, n_y], 1, data=pporog, height=0, + name=None, unit="m") + self.moro_cube = set_up_cube( + 1, [n_x, n_y], 1, data=modelorog, height=0, name=None, unit="m") + + def test_hc_rc(self, wind, dtime=1, height=None, aslist=False): + """Function to set up a wind cube from the supplied np.array. + + Set up the wind and call the RoughnessCorrection class. If the + supplied array is 1D, it is assumed to be the height profile + and the values are copied to all x-y points and all time steps. + If the supplied array is 2D, it is assumed that the supplied + array is a function of height x time. The point is copied to + all x-y points. The first dimension should be the height + dimension. If a 3D array is supplied, the order should be + height x time x x-y-grid. If a height is supplied, it needs to + agree with the first (zeroth) dimension of the supplied wind + array. + + Parameters + ---------- + wind: 2 or 3D array + Multi-level wind target data + dtime: integer, default 1 + Number of time dimension values + height: float, default None + Value for height in metres for zeroth slice of wind + aslist: boolean, default False + Make wind cube into a CubeList of height slices or not. + + """ + if aslist: + self.w_cube = iris.cube.CubeList() + for windfield in wind: + windfield = np.array(windfield) + if windfield.ndim == 1: # only function of height + windfield = np.ones( + windfield.shape+(1, self.n_x, self.n_y) + )*windfield.reshape(windfield.shape+(1, 1, 1)) + self.w_cube.append(set_up_cube( + 1, [self.n_x, self.n_y], windfield.shape[0], + data=windfield, name="wind_speed", unit="m s-1", + height=height)) + else: + wind = np.array(wind) + self.w_cube = iris.cube.Cube + if wind.ndim == 1: # only function of height + wind = np.ones( + wind.shape+(dtime, self.n_x, self.n_y) + )*wind.reshape(wind.shape+(1, 1, 1)) + elif wind.ndim == 2: # function of height and time + wind = np.ones( + wind.shape+(self.n_x, self.n_y) + )*wind.reshape(wind.shape+(1, 1)) + self.w_cube = set_up_cube( + dtime, [self.n_x, self.n_y], wind.shape[0], data=wind, + name="wind_speed", unit="m s-1", height=height) + plugin = RoughnessCorrection( + self.aos_cube, self.s_cube, self.poro_cube, + self.moro_cube, 1500., self.z0_cube + ) + return plugin.process(self.w_cube) + + +class TestSinglePoint(object): + """Test a single 1x1 grid. + + A cube is a single 1x x 1y grid, however, the z dimension is not 1. + It constructs 1x1 cubes for the ancillary fields Silhouette + roughness (AoS) and standard deviation of model height grid cell + (Sigma), vegetative roughness (z_0), post-processing grid orography + (pporog) and model orography(modelorog). If no values are supplied, + the values are: AoS = 0.2, Sigma = 20, z_0 = 0.2, pporog = 250, + modelorog = 230. + + The height level grid (heightlevels) can be supplied as an 1D + array. If nothing is supplied, the height level grid is [0.2, 3, + 13, 33, 133, 333, 1133]. + + """ + + def __init__(self, AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., + modelorog=230., heightlevels=np.array([0.2, 3., 13., 33., + 133., 333., 1133.])): + """Set up the single point test for RoughnessCorrection. + + Parameters + ---------- + + AoS: float + Silhouette roughness field + Sigma: float + Standard deviation field of height in grid cell + z_0: float + Vegetative roughness field + pporog: float + Unsmoothed orography on post-processing grid + modelorog: float + Model orography on post-processing grid + heightlevels: 1D np.array + Height level array + + """ + self.w_cube = None + self.aos_cube = set_up_cube(1, 1, 1, data=AoS, name=None, unit=None) + self.s_cube = set_up_cube(1, 1, 1, data=Sigma, name=None, unit="m") + if z_0 is None: + self.z0_cube = None + else: + self.z0_cube = set_up_cube(1, 1, 1, data=z_0, name=None, unit="m") + self.poro_cube = set_up_cube(1, 1, 1, data=pporog, name=None, + unit="m") + self.moro_cube = set_up_cube(1, 1, 1, data=modelorog, name=None, + unit="m") + if heightlevels is not None: + self.hl_cube = set_up_cube(1, 1, len(heightlevels), + data=heightlevels) + else: + self.hl_cube = None + + def test_hc_rc(self, wind, height=None): + """Test single point height correction and roughness correction. + + Make an iris cube of the supplied wind and set up the height + axis in m. + + Parameters + ---------- + wind: 1 or 2D array + Array of wind speeds + height: float, default None + Value for height in metres for zeroth slice of wind + + """ + wind = np.array(wind) + if wind.ndim == 1: + wind = wind.reshape([1, 1, wind.shape[0]]) + elif wind.ndim == 2: + wind = wind.reshape([wind.shape[0], 1, wind.shape[1]]) + self.w_cube = set_up_cube(wind.shape[0], 1, wind.shape[2], + data=np.rollaxis(wind, 2, start=0), + name="wind_speed", unit="m s-1", + height=height) + plugin = RoughnessCorrection( + self.aos_cube, self.s_cube, self.poro_cube, self.moro_cube, + 1500., self.z0_cube, self.hl_cube + ) + return plugin.process(self.w_cube) + + +class Test1D(IrisTest): + + """Class to test 1 x-y point cubes. + + This class tests the correct behaviour if np.nan or RMDI are + passed, as well as testing the general behaviour of points that + should not have a height corretion (equal height in model and pp + orography) and the correct behaviour of doing roughness correction, + depending on whether or not a vegetative roughness (z_0) cube is + provided. + + Section 0 are tests where RMDI or np.nan values are passed. + Section 1 are sensible single point tests. + + """ + uin = [20., 20., 20., 20., 20., 20., 20.] + hls = [0.2, 3, 13, 33, 133, 333, 1133] + + def test_section0a(self): + """Test AoS is RMDI, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=RMDI, Sigma=20.0, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0b(self): + """Test AoS is np.nan, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=np.nan, Sigma=20.0, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0c(self): + """Test Sigma is RMDI, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=RMDI, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0d(self): + """Test Sigma is np.nan, point should not do anything, uin = uout.""" + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=np.nan, z_0=0.2, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0e(self): + """Test z_0 is RMDI, point should not do RC. + + modeloro = pporo, so point should not do HC, uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=RMDI, pporog=230., modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0f(self): + """Test z_0 is np.nan, point should not do RC. + + modeloro = pporo, so point should not do HC, uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=np.nan, pporog=230., modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section0g(self): + """Test z_0 is RMDI, point should not do RC. + + modeloro < pporo, so point should do positive HC, uin < uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=RMDI, pporog=250., modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data > + landpointtests_hc_rc.w_cube.data).all()) + + def test_section0h(self): + """Test pporog is RMDI (QUESTION: or should this fail???) + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=RMDI, modelorog=230., + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0i(self): + """Test pporog is np.nan (QUESTION: or should this fail???) + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=np.nan, modelorog=230., + heightlevels=self.hls + ) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0j(self): + """Test modelorog is RMDI (QUESTION: or should this fail???). + + RC could be done for this point, HC cannot. + uin >= uout + and since z_0=height[0] + uout[0] = 0 + + """ + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., modelorog=RMDI, + heightlevels=self.hls) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + land_hc_rc.data[0] == 0) + + def test_section0k(self): + """Test fail for RMDI in height grid. + + height grid is RMDI at that location somewhere in z-direction, + should fail with ValueError. + + """ + hls = [0.2, 3, 13, RMDI, 133, 333, 1133] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=hls) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(self.uin) + + def test_section0l(self): + """Test fail for np.nan in height grid. + + height grid is np.nan at that location somewhere in z-direction, + should fail with ValueError. + + """ + hls = [0.2, 3, 13, np.nan, 133, 333, 1133] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=hls) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(self.uin) + + def test_section0m(self): + """Test fail for RMDI in uin. + + uin is RMDI at that location somewhere in z-direction, + should fail with ValueError. + + """ + uin = [20., 20., 20., RMDI, RMDI, 20., 0.] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=self.hls + ) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(uin) + + def test_section0n(self): + """Test fail for np.nan in uin. + + uin is np.nan at that location somewhere in z-direction, + should fail with ValueError. + + """ + uin = [20., 20., 20., np.nan, 20., 20., 20.] + landpointtests_hc_rc = TestSinglePoint( + AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250, modelorog=230, + heightlevels=self.hls + ) + with self.assertRaises(ValueError): + _ = landpointtests_hc_rc.test_hc_rc(uin) + + def test_section1a(self): + """Test HC only, HC = 0. + + z_0 passed as None, hence RC not performed. + modelorg = pporog, hence HC = 0. + uin = uout + + """ + landpointtests_hc = TestSinglePoint( + z_0=None, pporog=250., modelorog=250.) + land_hc_rc = landpointtests_hc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc.w_cube, land_hc_rc) + + def test_section1b(self): + """Test HC only. + + z_0 passed as None, hence RC not performed. + modelorg < pporog, hence positive HC. + uin <= uout, at least one height has uin < uout. + + """ + landpointtests_hc = TestSinglePoint( + z_0=None, pporog=250., modelorog=230.) + land_hc_rc = landpointtests_hc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data >= + landpointtests_hc.w_cube.data).all() and + (land_hc_rc.data > + landpointtests_hc.w_cube.data).any()) + + def test_section1c(self): + """Test RC and HC, HC=0. + + z_0 passed, hence RC performed. + modelorg == pporog, hence no HC. + uin >= uout, at least one height has uin > uout, uout[0] = 0. + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + land_hc_rc = landpointtests_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_rc.w_cube.data).all() and + (land_hc_rc.data < + landpointtests_rc.w_cube.data).any() and + land_hc_rc.data[0] == 0) + + def test_section1d(self): + """Test RC and HC. + + z_0 passed, hence RC performed. + modelorg >> pporog, hence negative HC. + uin >= uout, at least one height has uin > uout + z_0 = height[0] hence RC[0] results in 0. + uout[0] RC is 0. HC is negative, negative speeds not allowed. + Must be 0. + + """ + landpointtests_hc_rc = TestSinglePoint( + z_0=0.2, pporog=230., modelorog=250.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.failUnless((land_hc_rc.data <= + landpointtests_hc_rc.w_cube.data).all() and + (land_hc_rc.data < + landpointtests_hc_rc.w_cube.data).any() and + (land_hc_rc.data >= 0).all() and + land_hc_rc.data[0] == 0) + + def test_section1e(self): + """Test RC and HC, but sea point masked out (AoS). + + Sea point according to (AoS=0) => masked out. + z_0 passed, hence RC performed in theory. + uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint( + z_0=0.2, AoS=0.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + def test_section1f(self): + """Test RC and HC, but sea point masked out (Sigma). + + Sea point according to (Sigma=0) => masked out + z_0 passed, hence RC performed in theory. + uin = uout. + + """ + landpointtests_hc_rc = TestSinglePoint(z_0=0.2, Sigma=0.) + land_hc_rc = landpointtests_hc_rc.test_hc_rc(self.uin) + self.assertArrayEqual(landpointtests_hc_rc.w_cube, land_hc_rc) + + +class Test2D(IrisTest): + + """Test multi-point wind corrections. + + Section 2 are multiple point, multiple time tests + Section 3 are tests that should fail because the grids are not all + the same or units are wrong. + + """ + uin = [20., 20., 20., 20., 20., 20., 20.] + hls = [0.2, 3, 13, 33, 133, 333, 1133] + + def test_section2a(self): + """Test multiple points. + + All points should have equal u profile hence all points in a + slice over height should be equal. + + """ + hlvs = 10 + uin = np.ones(hlvs)*20 + heights = ((np.arange(hlvs)+1)**2.)*12. + multip_hc_rc = TestMultiPoint(3) + land_hc_rc = multip_hc_rc.test_hc_rc(uin, dtime=1, height=heights) + hidx = land_hc_rc.shape.index(hlvs) + for field in land_hc_rc.slices_over(hidx): + self.failUnless((field.data == field.data[0, 0]).all()) + + def test_section2b(self): + """Test a mix of sea and land points over multiple timesteps. + + p1: sea point, no correction + p2: land point, equal height, RC (<=uin), no HC + p3: land point, model=p2 everywhere + Two time steps tested, should be equal. + + """ + uin = np.ones(10)*20 + heights = ((np.arange(10)+1)**2.)*12 + multip_hc_rc = TestMultiPoint( + nx_ny=[3, 1], AoS=[0, 0.2, 0.2], pporog=[0, 250, 250], + modelorog=[0, 250, 230]) + land_hc_rc = multip_hc_rc.test_hc_rc(uin, dtime=2, height=heights) + tidx = land_hc_rc.shape.index(2) + time1 = land_hc_rc.data.take(0, axis=tidx) + time2 = land_hc_rc.data.take(1, axis=tidx) + # Check on time. + self.assertArrayEqual(time1, time2) + xidxnew = land_hc_rc.shape.index(3) + xidxold = multip_hc_rc.w_cube.data.shape.index(3) + landp1new = land_hc_rc.data.take(0, axis=xidxnew) + landp1old = multip_hc_rc.w_cube.data.take(0, axis=xidxold) + # Check on p1. + self.assertArrayEqual(landp1new, landp1old) + landp2new = land_hc_rc.data.take(1, axis=xidxnew) + landp2old = multip_hc_rc.w_cube.data.take(1, axis=xidxold) + # Check on p2. + self.failUnless((landp2new <= landp2old).all() and + (landp2new < landp2old).any()) + landp3new = land_hc_rc.data.take(2, axis=xidxnew) + # Check on p3. + self.failUnless((landp2new <= landp3new).all() and + (landp2new < landp3new).any()) + + def test_section2c(self): + """As test 2b, but passing the two time steps in a list. + + timesteps are a list rather than a 4D cube. This should raise + an error. + + """ + uin = np.ones(10)*20 + heights = ((np.arange(10)+1)**2.)*12 + multip_hc_rc = TestMultiPoint( + nx_ny=[3, 1], AoS=[0, 0.2, 0.2], pporog=[0, 250, 250], + modelorog=[0, 250, 230] + ) + msg = "wind input is not a cube, but " + with self.assertRaisesRegexp(ValueError, msg): + _ = multip_hc_rc.test_hc_rc([uin, uin], dtime=2, height=heights, + aslist=True) + + def test_section3a(self): + """As test 1c, however with manipulated z_0 cube. + + All ancillary fields have 1x1 dim, z_0 is on a different grid. + This should fail with ValueError("ancillary grids are not + consistent"). + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.z0_cube = set_up_cube( + 1, [1, 2], 1, data=np.array([landpointtests_rc.z0_cube.data, + landpointtests_rc.z0_cube.data]), + height=0, name=None, unit="m") + msg = "ancillary grids are not consistent" + with self.assertRaisesRegexp(ValueError, msg): + _ = landpointtests_rc.test_hc_rc(self.uin) + + def test_section3b(self): + """As test 3a, however with manipulated modelorog cube instead. + + This should fail with ValueError("ancillary grids are not + consistent"). + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.moro_cube = set_up_cube( + 1, [1, 2], 1, data=np.array([landpointtests_rc.moro_cube.data, + landpointtests_rc.moro_cube.data]), + height=0, name=None, unit="m") + msg = "ancillary grids are not consistent" + with self.assertRaisesRegexp(ValueError, msg): + _ = landpointtests_rc.test_hc_rc(self.uin) + + def test_section3c(self): + """As test 3a, however with manipulated z_0 units. + + This should fail with a wrong units error. + + """ + landpointtests_rc = TestSinglePoint( + z_0=0.2, pporog=250., modelorog=250.) + landpointtests_rc.z0_cube.units = Unit('s') + msg = ("z0 ancil has unexpected unit: should be {} " + "is {}") + with self.assertRaisesRegexp( + ValueError, msg.format(Unit('m'), + landpointtests_rc.z0_cube.units)): + _ = landpointtests_rc.test_hc_rc(self.uin) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py new file mode 100644 index 0000000000..53de10fc15 --- /dev/null +++ b/lib/improver/wind_downscaling.py @@ -0,0 +1,964 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module containing wind downscaling plugins.""" + +import copy +import itertools + +from cf_units import Unit +import iris +import numpy as np + +from improver.constants import RMDI + +# Scale parameter to determine reference height +ABSOLUTE_CORRECTION_TOL = 0.04 + +# Scaling parameter to determine reference height +HREF_SCALE = 2.0 + +# Von Karman's constant +VONKARMAN = 0.4 + +# Default roughness length for sea points +Z0M_SEA = 0.0001 + + +class FrictionVelocity(object): + + """"Class to calculate the friction velocity. + + This holds the function to calculate the friction velocity u_star, + given a reference height h_ref, the velocity at the reference + height u_href and the surface roughness z_0. + + """ + + def __init__(self, u_href, h_ref, z_0, mask): + """Initialise the class. + + Parameters: + ----------- + u_href: 2D np.array (float) + wind speed at h_ref + h_ref: 2D np.array (float) + reference height + z_0: 2D np.array (float) + vegetative roughness length + mask: 2D np.array (logical) + where True, calculate u* + + comments: + * z_0 and h_ref need to have identical units. + * the calculated friction velocity will have the units of the + supplied velocity u_href. + + """ + self.u_href = u_href + self.h_ref = h_ref + self.z_0 = z_0 + self.mask = mask + + def calc_ustar(self): + """Function to calculate the friction velocity. + + Returns: + -------- + ustar: 2D array (float) + friction velocity + + """ + ustar = np.ones(self.u_href.shape) * RMDI + ustar[self.mask] = VONKARMAN * (self.u_href[self.mask]/np.log + (self.h_ref[self.mask] / + self.z_0[self.mask])) + return ustar + + +class RoughnessCorrectionUtilities(object): + + """Class to calculate the height and roughness wind corrections. + + This holds functions to calculate the roughness and height + corrections given the ancil files: + * standard deviation of hight in grid cell as sigma (model grid on pp grid) + * Silhouette roughness as a_over_s (model grid on pp grid) + * vegetative roughness length z_0 (model grid on pp grid) + * post-processing grid orography pporo + * model grid orography interpolated on post-processing grid modoro + * height level 3D/ 1D grid + and + * windspeed 3D field on height level 3D grid (from above). + + """ + + def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): + """Set up roughness and height correction. + + This sets up the parameters used for roughness and height + correction given the ancillary file inputs: + + Parameters: + ---------- + a_over_s: 2D array (float) + Silhouette roughness field, dimensionless ancillary data, + calculated according to Robinson (2008) + sigma: 2D array (float) + Standard deviation field of height in the grid cell, units + of length + z_0: 2D array (float) + Vegetative roughness height field, units of length + pporo: 2D array (float) + Post processing grid orography field + modoro: 2D array (float) + Model orography field interpolated to post processing grid + ppres: scalar (float) + Grid cell length of post processing grid + modres: scalar (float) + Grid cell length of model grid + + """ + self.a_over_s = a_over_s + self.z_0 = z_0 + if z_0 is None: + self.l_no_winddownscale = True + else: + self.l_no_winddownscale = False + self.pporo = pporo + self.modoro = modoro + self.h_over_2 = self.sigma2hover2(sigma) # half peak to trough height + self.hcmask, self.rcmask = self.setmask() # HC mask, RC mask + if not self.l_no_winddownscale: + self.z_0[z_0 <= 0] = Z0M_SEA + self.dx_min = ppres/2. # scales smaller than this not resolved in pp + # the original code had hardcoded 500 + self.dx_max = 3.*modres # scales larger than this resolved in model + # the original code had hardcoded 4000 + self.wavenum = self.calc_wav() # k = 2*pi / L + self.h_ref = self.calc_h_ref() + self.refinemask() # HC mask needs to be updated for missing orography + self.h_at0 = self.delta_height() # pp orography - model orography + + def refinemask(self): + """Remask over RMDI and NaN orography. + + The mask for HC needs to be False where either of the + orographies (model or pp) has an invalid number. This cannot be + done before because the mask is used to calculate the + wavenumber which can and should be calculated for all points + where h_over_2 and a_over_s is a valid number. + + """ + self.hcmask[self.pporo == RMDI] = False + self.hcmask[self.modoro == RMDI] = False + self.hcmask[np.isnan(self.pporo)] = False + self.hcmask[np.isnan(self.modoro)] = False + + def setmask(self): + """Create a ~land-sea mask. + + Create a mask that is basically a land-sea mask: + Both, the standard deviation and the silouette roughness, are 0 + over the sea. A standard deviation of 0 results in a RMDI for + h_over_2. + + Returns: + ------- + hcmask: 2D array (logical) + True for land-points, false for Sea (HC) + rcmask: 2D array (logical) + additionally False for invalid z_0 (RC) + + """ + hcmask = np.full(self.h_over_2.shape, True, dtype=bool) + hcmask[self.h_over_2 <= 0] = False + hcmask[self.a_over_s <= 0] = False + hcmask[np.isnan(self.h_over_2)] = False + hcmask[np.isnan(self.a_over_s)] = False + rcmask = np.copy(hcmask) + if not self.l_no_winddownscale: + rcmask[self.z_0 <= 0] = False + rcmask[np.isnan(self.z_0)] = False + return hcmask, rcmask + + @staticmethod + def sigma2hover2(sigma): + """Calculate the half peak-to-trough height. + + The ancillary data used to estimate the peak to trough height + contains the standard deviation of height in a cell. For + sine-waves, this relates to the amplitude of the wave as: + + Amplitude = sigma * sqrt(2) + + The amplitude would correspond to half the peak-to-trough + height (h_o_2). + + Parameters: + ----------- + sigma: 2D array + standard deviation of height in grid cell. + + Returns: + -------- + h_o_2: 2D array + of half peak-to-trough height. + + Comments: + Points that had sigma = 0 (i.e. sea points) are set to + RMDI. + + """ + h_o_2 = np.ones(sigma.shape) * RMDI + h_o_2[sigma > 0] = sigma[sigma > 0] * np.sqrt(2.0) + return h_o_2 + + def calc_wav(self): + """Calculate wavenumber k of typical orographic lengthscale. + + Function to calculate wavenumber k of typical orographic + lengthscale L: + k = 2*pi / L (1) + + L is approximated from half the peak-to-trough height h_over_2 + and the silhoutte roughness a_over_s (average of up-slopes per + unit length over several cross-sections through a grid cell) + as: + L = 2*h_over_2 / a_over_s (2) + + a_over_s is dimensionless since it is the sum of up-slopes + measured in the same unit lengths as it is calculated over. + + h_over_2 is calculated from the standard deviation of height in + a grid cell, sigma, as: + h_over_2 = sqrt(2) * sigma + + which is based on the assumptions of sine waves, see + sigma2hover2. + + From eq. (1) and (2) it follows that: + k = 2*pi / (2*h_over_2 / a_over_s) + = a_over_s * pi / h_over_2 + + Returns: + -------- + wavn: 2D np.array + wavenumber in units of inverse units of supplied h_over_2. + + """ + wavn = np.ones(self.a_over_s.shape) * RMDI + wavn[self.hcmask] = (self.a_over_s[self.hcmask] / + self.h_over_2[self.hcmask]*np.pi) + wavn[wavn > np.pi/self.dx_min] = np.pi/self.dx_min + wavn[self.h_over_2 == 0] = RMDI + wavn[abs(wavn) < np.pi/self.dx_max] = np.pi/self.dx_max + return wavn + + def calc_h_ref(self): + """Calculate the reference height for roughness correction. + + The reference height below which the flow is in equilibrium + with the vegetative roughness is proportional to 1/wavenum + (Howard & Clark, 2007). + + Vosper (2009) and Clark (2009) argue that at the reference + height, the perturbation should have decayed to a fraction + epsilon (ABSOLUTE_CORRECTION_TOL). The factor alpha + implements eq. 1.3 in Clark (2009): UK Climatology - Wind + Screening Tool. See also Vosper (2009) for a motivation. + + alpha is the log of scale parameter to determine reference + height which is currently set to 0.04 (this corresponds to + epsilon in both Vosper and Clark) + + Returns: + -------- + h_ref: 2D np.array (float) + reference height for roughness correction + + """ + alpha = -np.log(ABSOLUTE_CORRECTION_TOL) + aparam = np.ones(self.wavenum.shape) * RMDI + h_ref = np.ones(self.wavenum.shape) * RMDI + aparam[self.hcmask] = alpha + np.log(self.wavenum[self.hcmask] * + self.h_over_2[self.hcmask]) + aparam[aparam > 1.0] = 1.0 + aparam[aparam < 0.0] = 0.0 + h_ref[self.hcmask] = aparam[self.hcmask] / self.wavenum[self.hcmask] + h_ref[h_ref < 1.0] = 1.0 + h_ref = np.minimum(h_ref, HREF_SCALE*self.h_over_2) + h_ref[h_ref < 1.0] = 1.0 + h_ref[~self.hcmask] = 0.0 + return h_ref + + def roughness_correction_sub(self, hgrid, uold, mask): + """Function to perform the roughness correction. + + Parameters: + ---------- + hgrid: 3D or 1D np.array (float) + height above orography + uold: 3D np.array (float) + original velocities at hgrid. + + Returns: + -------- + unew: 3D np.array (float) + Corrected wind speed on hgrid. Above href, this is + equal to uold. + + Comments: + Replace the windspeed profile below the reference height with one + that increases logarithmic with height, bound by the original + velocity uhref at the reference height h_ref and by a 0 velocity at + the vegetative roughness height z_0 + + """ + uhref = self.calc_u_at_h(uold, hgrid, self.h_ref, mask) + if hgrid.ndim == 1: + hgrid = hgrid.reshape((1, 1, )+(hgrid.shape[0],)) + ustar = FrictionVelocity(uhref, self.h_ref, self.z_0, + mask).calc_ustar() + unew = np.copy(uold) + mhref = self.h_ref + mhref[~mask] = RMDI + cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) + unew[cond] = ( + ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) + )[cond]*(np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) + * np.ones(unew.shape)))[cond])/VONKARMAN + return unew + + def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): + """Function to interpolate u_in on h_in at hhere. + + Parameters: + ---------- + u_in: 3D array (float) + velocity on h_in layer, last dim is height + h_in: 3D or 1D array (float) + height layer array + hhere: 2D array (float) + height grid to interpolate at + (dolog: scalar (logial) + if True, log interpolation, default False) + + Returns: + ------- + uath: 2D array (float) + velocity interpolated at h + + """ + u_in = np.ma.masked_less(u_in, 0.0) + h_in = np.ma.masked_less(h_in, 0.0) + # h_in.mask = u_in.mask + # If I allow 1D height grids, I think I cannot do the hop over. + + # Ignores the height at the position where u_in is RMDI,"hops over" + hhere = np.ma.masked_less(hhere, 0.0) + upidx = np.argmax(h_in > hhere.reshape(hhere.shape+(1,)), axis=2) + # loidx = np.maximum(upidx-1, 0) #if RMDI, need below + loidx = np.argmin(np.ma.masked_less(hhere.reshape(hhere.shape+(1,)) - + h_in, 0.0), axis=2) + + if h_in.ndim == 3: + hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size + * h_in.shape[2], + h_in.shape[2])) + hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size + * h_in.shape[2], + h_in.shape[2])) + elif h_in.ndim == 1: + hup = h_in[upidx].flatten() + hlow = h_in[loidx].flatten() + uup = u_in.take(upidx.flatten()+np.arange(0, upidx.size*u_in.shape[2], + u_in.shape[2])) + ulow = u_in.take(loidx.flatten()+np.arange(0, loidx.size*u_in.shape[2], + u_in.shape[2])) + mask = mask.flatten() + uath = np.full(mask.shape, RMDI, dtype=float) + if dolog: + uath[mask] = self.loginterpol(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) + else: + uath[mask] = self.interp1d(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) + uath = np.reshape(uath, hhere.shape) + return uath + + @staticmethod + def interp1d(xup, xlow, at_x, yup, ylow): + """Simple 1D linear interpolation for 2D grid inputs level. + + Parameters: + ---------- + xlow: 2D np.array (float) + lower x-bins + xup: 2D np.array (float) + upper x-bins + at_x: 2D np.array (float) + x values to interpolate y at + yup: 2D np.array(float) + y(xup) + ylow: 2D np.array (float) + y(xlow) + + Returns: + ------- + interp: 2D np.array (float) + y(at_x) assuming a lin function between xlow and xup + + """ + interp = np.full(xup.shape, RMDI, dtype=float) + diffs = (xup - xlow) + interp[diffs != 0] = ( + ylow[diffs != 0]+((at_x[diffs != 0]-xlow[diffs != 0]) / + diffs[diffs != 0]*(yup[diffs != 0] + - ylow[diffs != 0]))) + interp[diffs == 0] = at_x[diffs == 0]/xup[diffs == 0]*(yup[diffs == 0]) + return interp + + @staticmethod + def loginterpol(x_u, x_l, at_x, y_u, y_l): + """Simple 1D log interpolation y(x), except if lowest layer is + ground level. + + Parameters: + ---------- + x_l: 2D np.array (float) + lower x-bins + x_u: 2D np.array (float) + upper x-bins + at_x: 2D np.array (float) + x values to interpolate y at + y_u: 2D np.array (float) + y(x_u) + y_l: 2D np.array (float) + y(x_l) + + Returns: + ------- + loginterp: 2D np.array (float) + y(at_x) assuming a log function between x_l and x_u + + """ + ain = np.full(x_u.shape, RMDI, dtype=float) + loginterp = np.full(x_u.shape, RMDI, dtype=float) + mfrac = x_u/x_l + mtest = (x_u/x_l != 1) & (at_x != x_u) + ain[mtest] = (y_u[mtest] - y_l[mtest])/np.log(mfrac[mtest]) + loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/x_u[mtest])+y_u[mtest] + mtest = (x_u/x_l == 1) # below lowest layer, make lin interp + loginterp[mtest] = at_x[mtest]/x_u[mtest] * (y_u[mtest]) + mtest = (at_x == x_u) # just use y_u + loginterp[mtest] = y_u[mtest] + return loginterp + + def height_corr_sub(self, u_a, heightg, mask, onemfrac): + """Function to calculate the additive height correction. + + Parameters: + ---------- + u_a: 2D array (float) + outer velocity, e.g. velocity at h_ref_orig + heightg: 1D or 3D array + heights above orography + onemfrac: currently, scalar = 1. + In principle, it is a function of position and height, e.g. + a 3D array (float) + + Returns: + ------- + hc_add: 3D array (float) + additive height correction to wind speed + + Comments: + The height correction is a disturbance of the flow that + decays exponentially with height. The larger the vertical + offset h_at0 (the higher the unresolved hill), the larger + is the disturbance. + + The more smooth the distrubance (the larger the horizontal + scale of the disturbance), the smaller the height + correction (hence, a larger wavenumber results in a larger + disturbance). + hc_add = exp(-height*wavenumber)*u(href)*h_at_0*k + + """ + (xdim, ydim) = u_a.shape + if heightg.ndim == 1: + zdim = heightg.shape[0] + heightg = heightg.reshape((1, 1, zdim)) + elif heightg.ndim == 3: + zdim = heightg.shape[2] + ml2 = self.h_at0*self.wavenum + expon = np.ones([xdim, ydim, zdim]) + mult = (self.wavenum).reshape(self.wavenum.shape+(1,))*heightg + expon[mult > 0.0001] = np.exp(-mult[mult > 0.0001]) + hc_add = ( + expon*u_a.reshape(u_a.shape+(1,)) * + ml2.reshape(ml2.shape+(1,))*onemfrac) + hc_add[~mask, :] = 0 + return hc_add + + def delta_height(self): + """Function to calculate pp-grid diff from model grid. + + Calculate the difference between pp-grid height and model + grid height. + + Returns: + ------- + deltZ: 2D np.array (float) + height difference, ppgrid-model + + """ + delt_z = np.ones(self.pporo.shape) * RMDI + delt_z[self.hcmask] = self.pporo[self.hcmask]-self.modoro[self.hcmask] + return delt_z + + def do_rc_hc_all(self, hgrid, uorig): + """Function to call HC and RC (height and roughness corrections). + + Parameters: + ---------- + hgrid: 1D or 3D array (float) + height grid of wind input + uorig: 3D array (float) + wind speed on these levels + + Returns: + ------- + result: 3D array + sum of unew: 3D array (float) RC corrected windspeed + on levels HC: 3D array (float) HC additional part + + """ + if hgrid.ndim == 3: + condition1 = ((hgrid == RMDI).any(axis=2)) + self.hcmask[condition1] = False + self.rcmask[condition1] = False + mask_rc = np.copy(self.rcmask) + mask_rc[(uorig == RMDI).any(axis=2)] = False + mask_hc = np.copy(self.hcmask) + mask_hc[(uorig == RMDI).any(axis=2)] = False + if not self.l_no_winddownscale: + unew = self.roughness_correction_sub(hgrid, uorig, mask_rc) + else: + unew = uorig + uhref_orig = self.calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) + mask_hc[uhref_orig <= 0] = False + onemfrac = 1.0 + # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) + hc_add = self.height_corr_sub(uhref_orig, hgrid, mask_hc, onemfrac) + result = unew + hc_add + result[result < 0.] = 0 # HC can be negative if pporo Date: Wed, 19 Apr 2017 09:00:03 +0100 Subject: [PATCH 0019/1367] Minor style corrections --- ...st_wind_downscaling_roughnesscorrection.py | 59 +++++++++---------- lib/improver/wind_downscaling.py | 2 + 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py index ac702b8a22..31f9f58be8 100644 --- a/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py +++ b/lib/improver/tests/test_wind_downscaling_roughnesscorrection.py @@ -30,8 +30,8 @@ # POSSIBILITY OF SUCH DAMAGE. """Unit tests for plugin wind_downscaling.RoughnessCorrection.""" -import unittest +import unittest from cf_units import Unit import iris @@ -125,18 +125,18 @@ def __init__(self, nx_ny=3, AoS=None, Sigma=None, z_0=0.2, pporog=None, Parameters ---------- - nxny: a scalar or an np.array([x,y]) - Sets dimension for tests. - AoS: float or 1D or 2D array - Silhouette roughness field - Sigma: float or 1D or 2D array - Standard deviation field of height in grid cell - z_0: float or 1D or 2D array - Vegetative roughness field - pporog: float or 1D or 2D array - Unsmoothed orography field on post-processing grid - modelorog: float or 1D or 2D array - Model orography field on post-processing grid + nxny: a scalar or an np.array([x,y]) + Sets dimension for tests. + AoS: float or 1D or 2D array + Silhouette roughness field + Sigma: float or 1D or 2D array + Standard deviation field of height in grid cell + z_0: float or 1D or 2D array + Vegetative roughness field + pporog: float or 1D or 2D array + Unsmoothed orography field on post-processing grid + modelorog: float or 1D or 2D array + Model orography field on post-processing grid """ if isinstance(nx_ny, int): @@ -257,19 +257,18 @@ def __init__(self, AoS=0.2, Sigma=20.0, z_0=0.2, pporog=250., Parameters ---------- - - AoS: float - Silhouette roughness field - Sigma: float - Standard deviation field of height in grid cell - z_0: float - Vegetative roughness field - pporog: float - Unsmoothed orography on post-processing grid - modelorog: float - Model orography on post-processing grid - heightlevels: 1D np.array - Height level array + AoS: float + Silhouette roughness field + Sigma: float + Standard deviation field of height in grid cell + z_0: float + Vegetative roughness field + pporog: float + Unsmoothed orography on post-processing grid + modelorog: float + Model orography on post-processing grid + heightlevels: 1D np.array + Height level array """ self.w_cube = None @@ -297,10 +296,10 @@ def test_hc_rc(self, wind, height=None): Parameters ---------- - wind: 1 or 2D array - Array of wind speeds - height: float, default None - Value for height in metres for zeroth slice of wind + wind: 1 or 2D array + Array of wind speeds + height: float, default None + Value for height in metres for zeroth slice of wind """ wind = np.array(wind) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 53de10fc15..bcca68597d 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -30,6 +30,7 @@ # POSSIBILITY OF SUCH DAMAGE. """Module containing wind downscaling plugins.""" + import copy import itertools @@ -39,6 +40,7 @@ from improver.constants import RMDI + # Scale parameter to determine reference height ABSOLUTE_CORRECTION_TOL = 0.04 From 3d72855a340751e70491b352f9cb83bc9058748b Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Wed, 19 Apr 2017 20:52:20 +0100 Subject: [PATCH 0020/1367] Fix pep8 and pylint numpy skip --- lib/improver/wind_downscaling.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index bcca68597d..86f23d05ff 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -355,8 +355,9 @@ def roughness_correction_sub(self, hgrid, uold, mask): cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) unew[cond] = ( ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) - )[cond]*(np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) - * np.ones(unew.shape)))[cond])/VONKARMAN + )[cond] * ( + np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) * + np.ones(unew.shape)))[cond])/VONKARMAN return unew def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): @@ -392,11 +393,11 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): h_in, 0.0), axis=2) if h_in.ndim == 3: - hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size - * h_in.shape[2], + hup = h_in.take(upidx.flatten()+np.arange(0, upidx.size * + h_in.shape[2], h_in.shape[2])) - hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size - * h_in.shape[2], + hlow = h_in.take(loidx.flatten()+np.arange(0, loidx.size * + h_in.shape[2], h_in.shape[2])) elif h_in.ndim == 1: hup = h_in[upidx].flatten() @@ -445,8 +446,8 @@ def interp1d(xup, xlow, at_x, yup, ylow): diffs = (xup - xlow) interp[diffs != 0] = ( ylow[diffs != 0]+((at_x[diffs != 0]-xlow[diffs != 0]) / - diffs[diffs != 0]*(yup[diffs != 0] - - ylow[diffs != 0]))) + diffs[diffs != 0]*(yup[diffs != 0] - + ylow[diffs != 0]))) interp[diffs == 0] = at_x[diffs == 0]/xup[diffs == 0]*(yup[diffs == 0]) return interp From 2f718b7a1ee8984d7f1ba0d2face00418ee11fea Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Thu, 4 May 2017 09:09:44 +0100 Subject: [PATCH 0021/1367] First round of review feedback --- lib/improver/wind_downscaling.py | 195 +++++++++++++++++-------------- 1 file changed, 109 insertions(+), 86 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 86f23d05ff..24aa59f278 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -92,16 +92,26 @@ def __init__(self, u_href, h_ref, z_0, mask): def calc_ustar(self): """Function to calculate the friction velocity. + ustar = K * u_href / ln(h_ref / z_0) + + where ustar is the friction velocity, K is Von Karman's + constant, u_ref is the wind speed at the reference height, + h_ref is the reference height and z_0 is the vegetative + roughness length. + Returns: -------- ustar: 2D array (float) friction velocity """ - ustar = np.ones(self.u_href.shape) * RMDI - ustar[self.mask] = VONKARMAN * (self.u_href[self.mask]/np.log - (self.h_ref[self.mask] / - self.z_0[self.mask])) + ustar = np.full(self.u_href.shape, RMDI) + ustar[self.mask] = ( + VONKARMAN * ( + self.u_href[self.mask] / + np.log(self.h_ref[self.mask] / self.z_0[self.mask]) + ) + ) return ustar @@ -111,7 +121,8 @@ class RoughnessCorrectionUtilities(object): This holds functions to calculate the roughness and height corrections given the ancil files: - * standard deviation of hight in grid cell as sigma (model grid on pp grid) + * standard deviation of height in grid cell as sigma (model grid on + pp grid) * Silhouette roughness as a_over_s (model grid on pp grid) * vegetative roughness length z_0 (model grid on pp grid) * post-processing grid orography pporo @@ -132,7 +143,9 @@ def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): ---------- a_over_s: 2D array (float) Silhouette roughness field, dimensionless ancillary data, - calculated according to Robinson (2008) + calculated according to Robinson, D. (2008) - Ancillary + file creation for the UM, Unified Model Documentation Paper + 73. sigma: 2D array (float) Standard deviation field of height in the grid cell, units of length @@ -150,26 +163,22 @@ def __init__(self, a_over_s, sigma, z_0, pporo, modoro, ppres, modres): """ self.a_over_s = a_over_s self.z_0 = z_0 - if z_0 is None: - self.l_no_winddownscale = True - else: - self.l_no_winddownscale = False self.pporo = pporo self.modoro = modoro self.h_over_2 = self.sigma2hover2(sigma) # half peak to trough height - self.hcmask, self.rcmask = self.setmask() # HC mask, RC mask - if not self.l_no_winddownscale: + self.hcmask, self.rcmask = self._setmask() # HC mask, RC mask + if self.z_0 is not None: self.z_0[z_0 <= 0] = Z0M_SEA self.dx_min = ppres/2. # scales smaller than this not resolved in pp # the original code had hardcoded 500 self.dx_max = 3.*modres # scales larger than this resolved in model # the original code had hardcoded 4000 - self.wavenum = self.calc_wav() # k = 2*pi / L - self.h_ref = self.calc_h_ref() - self.refinemask() # HC mask needs to be updated for missing orography - self.h_at0 = self.delta_height() # pp orography - model orography + self.wavenum = self._calc_wav() # k = 2*pi / L + self.h_ref = self._calc_h_ref() + self._refinemask() # HC mask needs to be updated for missing orography + self.h_at0 = self._delta_height() # pp orography - model orography - def refinemask(self): + def _refinemask(self): """Remask over RMDI and NaN orography. The mask for HC needs to be False where either of the @@ -184,7 +193,7 @@ def refinemask(self): self.hcmask[np.isnan(self.pporo)] = False self.hcmask[np.isnan(self.modoro)] = False - def setmask(self): + def _setmask(self): """Create a ~land-sea mask. Create a mask that is basically a land-sea mask: @@ -206,7 +215,7 @@ def setmask(self): hcmask[np.isnan(self.h_over_2)] = False hcmask[np.isnan(self.a_over_s)] = False rcmask = np.copy(hcmask) - if not self.l_no_winddownscale: + if self.z_0 is not None: rcmask[self.z_0 <= 0] = False rcmask[np.isnan(self.z_0)] = False return hcmask, rcmask @@ -239,11 +248,11 @@ def sigma2hover2(sigma): RMDI. """ - h_o_2 = np.ones(sigma.shape) * RMDI + h_o_2 = np.full(sigma.shape, RMDI) h_o_2[sigma > 0] = sigma[sigma > 0] * np.sqrt(2.0) return h_o_2 - def calc_wav(self): + def _calc_wav(self): """Calculate wavenumber k of typical orographic lengthscale. Function to calculate wavenumber k of typical orographic @@ -276,15 +285,17 @@ def calc_wav(self): wavenumber in units of inverse units of supplied h_over_2. """ - wavn = np.ones(self.a_over_s.shape) * RMDI - wavn[self.hcmask] = (self.a_over_s[self.hcmask] / - self.h_over_2[self.hcmask]*np.pi) + wavn = np.full(self.a_over_s.shape, RMDI) + wavn[self.hcmask] = ( + (self.a_over_s[self.hcmask] * np.pi) / + self.h_over_2[self.hcmask] + ) wavn[wavn > np.pi/self.dx_min] = np.pi/self.dx_min wavn[self.h_over_2 == 0] = RMDI wavn[abs(wavn) < np.pi/self.dx_max] = np.pi/self.dx_max return wavn - def calc_h_ref(self): + def _calc_h_ref(self): """Calculate the reference height for roughness correction. The reference height below which the flow is in equilibrium @@ -296,6 +307,9 @@ def calc_h_ref(self): epsilon (ABSOLUTE_CORRECTION_TOL). The factor alpha implements eq. 1.3 in Clark (2009): UK Climatology - Wind Screening Tool. See also Vosper (2009) for a motivation. + For a freely available external reference, see the Virtual Met + Mast Version 1 Methodology and Verification paper under + www.thecrownestate.co.uk. alpha is the log of scale parameter to determine reference height which is currently set to 0.04 (this corresponds to @@ -308,20 +322,23 @@ def calc_h_ref(self): """ alpha = -np.log(ABSOLUTE_CORRECTION_TOL) - aparam = np.ones(self.wavenum.shape) * RMDI - h_ref = np.ones(self.wavenum.shape) * RMDI - aparam[self.hcmask] = alpha + np.log(self.wavenum[self.hcmask] * - self.h_over_2[self.hcmask]) - aparam[aparam > 1.0] = 1.0 - aparam[aparam < 0.0] = 0.0 - h_ref[self.hcmask] = aparam[self.hcmask] / self.wavenum[self.hcmask] + tunable_param = np.full(self.wavenum.shape, RMDI) + h_ref = np.full(self.wavenum.shape, RMDI) + tunable_param[self.hcmask] = ( + alpha + np.log(self.wavenum[self.hcmask] * + self.h_over_2[self.hcmask]) + ) + tunable_param[tunable_param > 1.0] = 1.0 + tunable_param[tunable_param < 0.0] = 0.0 + h_ref[self.hcmask] = ( + tunable_param[self.hcmask] / self.wavenum[self.hcmask]) h_ref[h_ref < 1.0] = 1.0 - h_ref = np.minimum(h_ref, HREF_SCALE*self.h_over_2) + h_ref = np.minimum(h_ref, HREF_SCALE * self.h_over_2) h_ref[h_ref < 1.0] = 1.0 h_ref[~self.hcmask] = 0.0 return h_ref - def roughness_correction_sub(self, hgrid, uold, mask): + def calc_roughness_correction(self, hgrid, uold, mask): """Function to perform the roughness correction. Parameters: @@ -339,28 +356,28 @@ def roughness_correction_sub(self, hgrid, uold, mask): Comments: Replace the windspeed profile below the reference height with one - that increases logarithmic with height, bound by the original + that increases logarithmically with height, bound by the original velocity uhref at the reference height h_ref and by a 0 velocity at the vegetative roughness height z_0 """ - uhref = self.calc_u_at_h(uold, hgrid, self.h_ref, mask) + uhref = self._calc_u_at_h(uold, hgrid, self.h_ref, mask) if hgrid.ndim == 1: - hgrid = hgrid.reshape((1, 1, )+(hgrid.shape[0],)) + hgrid = hgrid[np.newaxis, np.newaxis, :] ustar = FrictionVelocity(uhref, self.h_ref, self.z_0, mask).calc_ustar() unew = np.copy(uold) mhref = self.h_ref mhref[~mask] = RMDI - cond = hgrid < (self.h_ref).reshape(self.h_ref.shape+(1,)) + cond = hgrid < self.h_ref[:, np.newaxis] unew[cond] = ( - ustar.reshape(ustar.shape+(1,))*np.ones(unew.shape) + ustar[:, np.newaxis]*np.ones(unew.shape) )[cond] * ( np.log(hgrid/(np.reshape(self.z_0, self.z_0.shape + (1,)) * np.ones(unew.shape)))[cond])/VONKARMAN return unew - def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): + def _calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): """Function to interpolate u_in on h_in at hhere. Parameters: @@ -371,7 +388,9 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): height layer array hhere: 2D array (float) height grid to interpolate at - (dolog: scalar (logial) + mask: 2D array (logical) + mask the final result for uath + (dolog: scalar (logical) if True, log interpolation, default False) Returns: @@ -387,9 +406,9 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): # Ignores the height at the position where u_in is RMDI,"hops over" hhere = np.ma.masked_less(hhere, 0.0) - upidx = np.argmax(h_in > hhere.reshape(hhere.shape+(1,)), axis=2) + upidx = np.argmax(h_in > hhere[:, np.newaxis], axis=2) # loidx = np.maximum(upidx-1, 0) #if RMDI, need below - loidx = np.argmin(np.ma.masked_less(hhere.reshape(hhere.shape+(1,)) - + loidx = np.argmin(np.ma.masked_less(hhere[:, np.newaxis] - h_in, 0.0), axis=2) if h_in.ndim == 3: @@ -409,26 +428,26 @@ def calc_u_at_h(self, u_in, h_in, hhere, mask, dolog=False): mask = mask.flatten() uath = np.full(mask.shape, RMDI, dtype=float) if dolog: - uath[mask] = self.loginterpol(hup[mask], hlow[mask], - hhere.flatten()[mask], - uup[mask], ulow[mask]) + uath[mask] = self._interpolate_log(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) else: - uath[mask] = self.interp1d(hup[mask], hlow[mask], - hhere.flatten()[mask], - uup[mask], ulow[mask]) + uath[mask] = self._interpolate_1d(hup[mask], hlow[mask], + hhere.flatten()[mask], + uup[mask], ulow[mask]) uath = np.reshape(uath, hhere.shape) return uath @staticmethod - def interp1d(xup, xlow, at_x, yup, ylow): + def _interpolate_1d(xup, xlow, at_x, yup, ylow): """Simple 1D linear interpolation for 2D grid inputs level. Parameters: ---------- - xlow: 2D np.array (float) - lower x-bins xup: 2D np.array (float) upper x-bins + xlow: 2D np.array (float) + lower x-bins at_x: 2D np.array (float) x values to interpolate y at yup: 2D np.array(float) @@ -452,42 +471,42 @@ def interp1d(xup, xlow, at_x, yup, ylow): return interp @staticmethod - def loginterpol(x_u, x_l, at_x, y_u, y_l): + def _interpolate_log(xup, xlow, at_x, yup, ylow): """Simple 1D log interpolation y(x), except if lowest layer is ground level. Parameters: ---------- - x_l: 2D np.array (float) - lower x-bins - x_u: 2D np.array (float) + xup: 2D np.array (float) upper x-bins + xlow: 2D np.array (float) + lower x-bins at_x: 2D np.array (float) x values to interpolate y at - y_u: 2D np.array (float) - y(x_u) - y_l: 2D np.array (float) - y(x_l) + yup: 2D np.array(float) + y(xup) + ylow: 2D np.array (float) + y(xlow) Returns: ------- loginterp: 2D np.array (float) - y(at_x) assuming a log function between x_l and x_u + y(at_x) assuming a log function between xlow and xup """ - ain = np.full(x_u.shape, RMDI, dtype=float) - loginterp = np.full(x_u.shape, RMDI, dtype=float) - mfrac = x_u/x_l - mtest = (x_u/x_l != 1) & (at_x != x_u) - ain[mtest] = (y_u[mtest] - y_l[mtest])/np.log(mfrac[mtest]) - loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/x_u[mtest])+y_u[mtest] - mtest = (x_u/x_l == 1) # below lowest layer, make lin interp - loginterp[mtest] = at_x[mtest]/x_u[mtest] * (y_u[mtest]) - mtest = (at_x == x_u) # just use y_u - loginterp[mtest] = y_u[mtest] + ain = np.full(xup.shape, RMDI, dtype=float) + loginterp = np.full(xup.shape, RMDI, dtype=float) + mfrac = xup/xlow + mtest = (xup/xlow != 1) & (at_x != xup) + ain[mtest] = (yup[mtest] - ylow[mtest])/np.log(mfrac[mtest]) + loginterp[mtest] = ain[mtest]*np.log(at_x[mtest]/xup[mtest])+yup[mtest] + mtest = (xup/xlow == 1) # below lowest layer, make lin interp + loginterp[mtest] = at_x[mtest]/xup[mtest] * (yup[mtest]) + mtest = (at_x == xup) # just use yup + loginterp[mtest] = yup[mtest] return loginterp - def height_corr_sub(self, u_a, heightg, mask, onemfrac): + def _calc_height_corr(self, u_a, heightg, mask, onemfrac): """Function to calculate the additive height correction. Parameters: @@ -496,6 +515,8 @@ def height_corr_sub(self, u_a, heightg, mask, onemfrac): outer velocity, e.g. velocity at h_ref_orig heightg: 1D or 3D array heights above orography + mask: 3D array(logical) + Masks the hc_add result onemfrac: currently, scalar = 1. In principle, it is a function of position and height, e.g. a 3D array (float) @@ -511,30 +532,32 @@ def height_corr_sub(self, u_a, heightg, mask, onemfrac): offset h_at0 (the higher the unresolved hill), the larger is the disturbance. - The more smooth the distrubance (the larger the horizontal + The more smooth the disturbance (the larger the horizontal scale of the disturbance), the smaller the height correction (hence, a larger wavenumber results in a larger disturbance). - hc_add = exp(-height*wavenumber)*u(href)*h_at_0*k + hc_add = exp(-height*wavenumber)*u(href)*h_at_0*wavenumber + + A final factor of 1 is assumed and omitted for the Bessel + function term. """ (xdim, ydim) = u_a.shape if heightg.ndim == 1: zdim = heightg.shape[0] - heightg = heightg.reshape((1, 1, zdim)) + heightg = heightg[np.newaxis, np.newaxis, :] elif heightg.ndim == 3: zdim = heightg.shape[2] ml2 = self.h_at0*self.wavenum expon = np.ones([xdim, ydim, zdim]) - mult = (self.wavenum).reshape(self.wavenum.shape+(1,))*heightg + mult = self.wavenum[:, np.newaxis]*heightg expon[mult > 0.0001] = np.exp(-mult[mult > 0.0001]) hc_add = ( - expon*u_a.reshape(u_a.shape+(1,)) * - ml2.reshape(ml2.shape+(1,))*onemfrac) + expon*u_a[:, np.newaxis] * ml2[:, np.newaxis] * onemfrac) hc_add[~mask, :] = 0 return hc_add - def delta_height(self): + def _delta_height(self): """Function to calculate pp-grid diff from model grid. Calculate the difference between pp-grid height and model @@ -542,7 +565,7 @@ def delta_height(self): Returns: ------- - deltZ: 2D np.array (float) + delt_z: 2D np.array (float) height difference, ppgrid-model """ @@ -550,7 +573,7 @@ def delta_height(self): delt_z[self.hcmask] = self.pporo[self.hcmask]-self.modoro[self.hcmask] return delt_z - def do_rc_hc_all(self, hgrid, uorig): + def _do_rc_hc_all(self, hgrid, uorig): """Function to call HC and RC (height and roughness corrections). Parameters: @@ -576,14 +599,14 @@ def do_rc_hc_all(self, hgrid, uorig): mask_hc = np.copy(self.hcmask) mask_hc[(uorig == RMDI).any(axis=2)] = False if not self.l_no_winddownscale: - unew = self.roughness_correction_sub(hgrid, uorig, mask_rc) + unew = self.calc_roughness_correction(hgrid, uorig, mask_rc) else: unew = uorig - uhref_orig = self.calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) + uhref_orig = self._calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) mask_hc[uhref_orig <= 0] = False onemfrac = 1.0 # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) - hc_add = self.height_corr_sub(uhref_orig, hgrid, mask_hc, onemfrac) + hc_add = self._calc_height_corr(uhref_orig, hgrid, mask_hc, onemfrac) result = unew + hc_add result[result < 0.] = 0 # HC can be negative if pporo Date: Thu, 4 May 2017 16:27:38 +0100 Subject: [PATCH 0022/1367] Fix l_no_downscale --- lib/improver/wind_downscaling.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 24aa59f278..e4802e584b 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -598,7 +598,7 @@ def _do_rc_hc_all(self, hgrid, uorig): mask_rc[(uorig == RMDI).any(axis=2)] = False mask_hc = np.copy(self.hcmask) mask_hc[(uorig == RMDI).any(axis=2)] = False - if not self.l_no_winddownscale: + if self.z_0 is not None: unew = self.calc_roughness_correction(hgrid, uorig, mask_rc) else: unew = uorig From d1fe3e0f0776dc4984dfdaec22dbeb41ad76f890 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 19 May 2017 12:52:18 +0100 Subject: [PATCH 0023/1367] Edit to simplify find_coord_order method. --- lib/improver/wind_downscaling.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 87e51d2df8..a1d577f38f 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -848,9 +848,7 @@ def find_coord_order(self, mcube): positions = [np.nan, np.nan, np.nan, np.nan] for coord_index, coord_name in enumerate(coord_names): if mcube.coords(coord_name, dim_coords=True): - coord_dimension = mcube.coord_dims(coord_name) - coord_dimension = coord_dimension[0] - positions[coord_index] = coord_dimension + positions[coord_index] = mcube.coord_dims(coord_name)[0] return positions def find_heightgrid(self, wind): From b733f4e1d6c09f1c050ab63160f93e5d21222220 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Fri, 19 May 2017 12:59:56 +0100 Subject: [PATCH 0024/1367] Add help test --- tests/improver-nbhood/01-help.bats | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 tests/improver-nbhood/01-help.bats diff --git a/tests/improver-nbhood/01-help.bats b/tests/improver-nbhood/01-help.bats new file mode 100644 index 0000000000..26bfb62744 --- /dev/null +++ b/tests/improver-nbhood/01-help.bats @@ -0,0 +1,22 @@ +#!/usr/bin/env bats + +@test "nbhood -h" { + run improver nbhood -h + [[ "$status" -eq 0 ]] + read -d '' expected <<'__HELP__' || true +usage: improver-nbhood [-h] [--radius-in-km RADIUS] INPUT_FILE OUTPUT_FILE + +Apply basic weighted circle smoothing via the BasicNeighbourhoodProcessing +plugin to a file with one cube. + +positional arguments: + INPUT_FILE A path to an input NetCDF file to be processed + OUTPUT_FILE The output path for the processed NetCDF + +optional arguments: + -h, --help show this help message and exit + --radius-in-km RADIUS + The kernel radius for neighbourhood processing +__HELP__ + [[ "$output" == "$expected" ]] +} From ae5c9243e8b220008d0e962be0b6d3b53dda32b6 Mon Sep 17 00:00:00 2001 From: gavinevans Date: Fri, 19 May 2017 13:17:28 +0100 Subject: [PATCH 0025/1367] Remove whitespace. --- lib/improver/wind_downscaling.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index 0bc5d7fd76..fd1f15ee4a 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -608,13 +608,11 @@ def do_rc_hc_all(self, hgrid, uorig): unew = uorig uhref_orig = self._calc_u_at_h(uorig, hgrid, 1.0/self.wavenum, mask_hc) mask_hc[uhref_orig <= 0] = False - # Setting this value to 1, is equivalent to setting the # Bessel function to 1. (Friedrich, 2016) # Example usage if the Bessel function was not set to 1 is: # onemfrac = 1.0 - BfuncFrac(nx,ny,nz,heightvec,z_0,waveno, Ustar, UI) onemfrac = 1.0 - hc_add = self._calc_height_corr(uhref_orig, hgrid, mask_hc, onemfrac) result = unew + hc_add result[result < 0.] = 0 # HC can be negative if pporo Date: Mon, 22 May 2017 08:57:10 +0100 Subject: [PATCH 0026/1367] Add pylintrc symlink for Codacy --- .pylintrc | 1 + 1 file changed, 1 insertion(+) create mode 120000 .pylintrc diff --git a/.pylintrc b/.pylintrc new file mode 120000 index 0000000000..41cc413ce4 --- /dev/null +++ b/.pylintrc @@ -0,0 +1 @@ +etc/pylintrc \ No newline at end of file From fd5ec896c3c386ede002f5308d6483d9b234d675 Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Tue, 23 May 2017 13:10:19 +0100 Subject: [PATCH 0027/1367] Adding unit tests and revised version of the weighted blending plugin --- ...est_weighted_blend_BasicWeightedAverage.py | 83 ++++++++++++++++++- lib/improver/weighted_blend.py | 64 +++++++++----- 2 files changed, 121 insertions(+), 26 deletions(-) diff --git a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py index 9aef1d7cf3..3cab309567 100644 --- a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py +++ b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py @@ -34,6 +34,7 @@ import unittest from cf_units import Unit +import iris from iris.coords import AuxCoord, DimCoord from iris.cube import Cube from iris.tests import IrisTest @@ -49,8 +50,8 @@ class TestBasicWeightedAverage(IrisTest): def setUp(self): """Create a cube with a single non-zero point.""" data = np.zeros((2, 5, 5)) - data[0][:][:] = 0.0 - data[1][:][:] = 1.0 + data[0][:][:] = 1.0 + data[1][:][:] = 2.0 cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2 s^-1") cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 5), 'latitude', @@ -60,9 +61,15 @@ def setUp(self): time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) - cube.add_aux_coord(AuxCoord([402192.5,402193.5], + cube.add_aux_coord(AuxCoord([402192.5, 402193.5], "time", units=tunit), 0) self.cube = cube + new_scalar_coord = iris.coords.AuxCoord(1, + long_name='dummy_scalar_coord', + units='no_unit') + cube_with_scalar = cube.copy() + cube_with_scalar.add_aux_coord(new_scalar_coord) + self.cube_with_scalar = cube_with_scalar def test_basic(self): """Test that the plugin returns an iris.cube.Cube.""" @@ -71,6 +78,76 @@ def test_basic(self): result = plugin.process(self.cube) self.assertIsInstance(result, Cube) + def test_fails_coord_not_in_cube(self): + """Test it Raises a Value Error if coord not in the cube""" + coord = "notset" + plugin = BasicWeightedAverage(coord) + msg = ('the coord for this plugin must be ' + + 'an existing coordinate in the input cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube) + + def test_fails_input_not_a_cube(self): + """Test it Raises a Value Error if not supplied with a cube""" + coord = "time" + plugin = BasicWeightedAverage(coord) + notacube = 0.0 + msg = ('the first argument must be an instance of ' + + 'iris.cube.Cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(notacube) + + def test_fails_weights_shape(self): + """Test it Raises a Value Error if weights shape does not match + coord shape""" + coord = "time" + plugin = BasicWeightedAverage(coord) + weights = [0.1, 0.2, 0.7] + msg = ('the weights array must match the shape ' + + 'of the coordinate in the input cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, weights) + + def test_coord_adjust_set(self): + """Test it works with coord adjust set""" + coord = "time" + coord_adjust = lambda pnts: pnts[len(pnts)-1] + plugin = BasicWeightedAverage(coord, coord_adjust) + result = plugin.process(self.cube) + self.assertAlmostEquals(result.coord(coord).points[0], 402193.5) + + def test_scalar_coord(self): + """Test it works on scalar coord""" + coord = "dummy_scalar_coord" + plugin = BasicWeightedAverage(coord) + weights = np.array([1.0]) + result = plugin.process(self.cube_with_scalar, weights) + self.assertAlmostEquals(result.data[0, 0, 0], 1.0) + + def test_weights_equal_none(self): + """Test it works with weights set to None""" + coord = "time" + plugin = BasicWeightedAverage(coord) + weights = None + result = plugin.process(self.cube, weights) + self.assertAlmostEquals(result.data[0, 0], 1.5) + + def test_weights_equal_list(self): + """Test it work with weights set to list [0.2, 0.8]""" + coord = "time" + plugin = BasicWeightedAverage(coord) + weights = [0.2, 0.8] + result = plugin.process(self.cube, weights) + self.assertAlmostEquals(result.data[0, 0], 1.8) + + def test_weights_equal_array(self): + """Test it works with weights set to array (0.8, 0.2)""" + coord = "time" + plugin = BasicWeightedAverage(coord) + weights = np.array([0.8, 0.2]) + result = plugin.process(self.cube, weights) + self.assertAlmostEquals(result.data[0, 0], 1.2) + if __name__ == '__main__': unittest.main() diff --git a/lib/improver/weighted_blend.py b/lib/improver/weighted_blend.py index 5c5fe23db8..f93161cb0f 100644 --- a/lib/improver/weighted_blend.py +++ b/lib/improver/weighted_blend.py @@ -32,6 +32,7 @@ import iris +import numpy as np class BasicWeightedAverage(object): @@ -40,56 +41,73 @@ class BasicWeightedAverage(object): """ def __init__(self, coord, coord_adjust=None): - """Set up for processing an in-or-out of threshold binary field. - - Parameters - ---------- - - coord : string - The name of a coordinate dimension in the cube - - coord_adjust : - + """Set up for a Basic Weighted Average Blending plugin + + Args: + coord : string + The name/s of a coordinate dimension/s in the cube + + coord_adjust : Function to apply to the coordinate after + collapsing the cube to correct the values + for example for time windowing and + cycle averaging the follow function would + adjust the time coordinates + e.g. coord_adjust = lambda pnts: pnts[len(pnts)/2] """ self.coord = coord self.coord_adjust = coord_adjust - def __str__(self): + def __repr__(self): """Represent the configured plugin instance as a string.""" return ( - '').format(self.coord) + '').format(self.coord) def process(self, cube, weights=None): - """Convert each point to a fuzzy truth value based on threshold. + """Calculated weighted mean across the chosen coord - Parameters - ---------- + Args: + cube : iris.cube.Cube + Cube to blend across the coord. - cube : iris.cube.Cube - Cube to blend across the coord. + weights: Optional list or np.array of weights + or None (equivalent to equal weights) - weights: array of weights + Returns: + result : iris.cube.Cube """ if not isinstance(cube, iris.cube.Cube): raise ValueError('the first argument must be an instance of ' + 'iris.cube.Cube') if not cube.coords(self.coord): - raise ValueError('the second argument must be ' + + raise ValueError('the coord for this plugin must be ' + 'an existing coordinate in the input cube') + # Find the coords dimension. + # If coord is a scalar_coord try adding it collapse_dim = cube.coord_dims(self.coord) if not collapse_dim: + print 'Warning: Could not find collapse dimension ' + \ + 'will try adding it' cube = iris.util.new_axis(cube, self.coord) collapse_dim = cube.coord_dims(self.coord) + # supply weights as an array of weights whose shape matches the cube + weights_array = None if weights is not None: - weights = iris.util.broadcast_to_shape(np.array(weights), - cube.shape, collapse_dim) - result = cube.collapsed(coord, iris.analysis.MEAN, weights=weights) + if np.array(weights).shape != cube.coord(self.coord).points.shape: + raise ValueError('the weights array must match the shape ' + + 'of the coordinate in the input cube') + weights_array = iris.util.broadcast_to_shape(np.array(weights), + cube.shape, + collapse_dim) + # Calculate the weighted average + result = cube.collapsed(self.coord, + iris.analysis.MEAN, weights=weights_array) + # if set adjust values of collapsed coordinates if self.coord_adjust is not None: - # adjust values of collapsed coordinates for crd in result.coords(): if cube.coord_dims(crd.name()) == collapse_dim: pnts = cube.coord(crd.name()).points crd.points = np.array(self.coord_adjust(pnts), dtype=crd.points.dtype) + return result From 00b2ae1edc3e7e9d06538ac600caebc18caa783e Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Wed, 24 May 2017 11:14:45 +0100 Subject: [PATCH 0028/1367] Adding changes suggested by Gavin to blending weight plugin --- ...est_weighted_blend_BasicWeightedAverage.py | 36 ++++++++++++------- lib/improver/weighted_blend.py | 24 ++++++------- 2 files changed, 35 insertions(+), 25 deletions(-) diff --git a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py index 3cab309567..de426174f3 100644 --- a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py +++ b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py @@ -32,6 +32,7 @@ import unittest +import warnings from cf_units import Unit import iris @@ -49,14 +50,14 @@ class TestBasicWeightedAverage(IrisTest): def setUp(self): """Create a cube with a single non-zero point.""" - data = np.zeros((2, 5, 5)) + data = np.zeros((2, 2, 2)) data[0][:][:] = 1.0 data[1][:][:] = 2.0 cube = Cube(data, standard_name="precipitation_amount", units="kg m^-2 s^-1") - cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 5), 'latitude', + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', units='degrees'), 1) - cube.add_dim_coord(DimCoord(np.linspace(120, 180, 5), 'longitude', + cube.add_dim_coord(DimCoord(np.linspace(120, 180, 2), 'longitude', units='degrees'), 2) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" @@ -82,7 +83,7 @@ def test_fails_coord_not_in_cube(self): """Test it Raises a Value Error if coord not in the cube""" coord = "notset" plugin = BasicWeightedAverage(coord) - msg = ('the coord for this plugin must be ' + + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube) @@ -92,7 +93,7 @@ def test_fails_input_not_a_cube(self): coord = "time" plugin = BasicWeightedAverage(coord) notacube = 0.0 - msg = ('the first argument must be an instance of ' + + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube') with self.assertRaisesRegexp(ValueError, msg): plugin.process(notacube) @@ -103,7 +104,7 @@ def test_fails_weights_shape(self): coord = "time" plugin = BasicWeightedAverage(coord) weights = [0.1, 0.2, 0.7] - msg = ('the weights array must match the shape ' + + msg = ('The weights array must match the shape ' + 'of the coordinate in the input cube') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube, weights) @@ -114,15 +115,23 @@ def test_coord_adjust_set(self): coord_adjust = lambda pnts: pnts[len(pnts)-1] plugin = BasicWeightedAverage(coord, coord_adjust) result = plugin.process(self.cube) - self.assertAlmostEquals(result.coord(coord).points[0], 402193.5) + self.assertAlmostEquals(result.coord(coord).points, [402193.5]) def test_scalar_coord(self): """Test it works on scalar coord""" coord = "dummy_scalar_coord" plugin = BasicWeightedAverage(coord) weights = np.array([1.0]) - result = plugin.process(self.cube_with_scalar, weights) - self.assertAlmostEquals(result.data[0, 0, 0], 1.0) + with warnings.catch_warnings(record=True) as warning_list: + warnings.simplefilter("always") + result = plugin.process(self.cube_with_scalar, weights) + self.assertTrue(any(item.category == UserWarning + for item in warning_list)) + warning_msg = "Could not find collapse dimension" + self.assertTrue(any(warning_msg in str(item) + for item in warning_list)) + print warning_list + self.assertArrayAlmostEqual(result.data, self.cube.data) def test_weights_equal_none(self): """Test it works with weights set to None""" @@ -130,7 +139,8 @@ def test_weights_equal_none(self): plugin = BasicWeightedAverage(coord) weights = None result = plugin.process(self.cube, weights) - self.assertAlmostEquals(result.data[0, 0], 1.5) + expected_result_array = np.ones((2, 2))*1.5 + self.assertArrayAlmostEqual(result.data, expected_result_array) def test_weights_equal_list(self): """Test it work with weights set to list [0.2, 0.8]""" @@ -138,7 +148,8 @@ def test_weights_equal_list(self): plugin = BasicWeightedAverage(coord) weights = [0.2, 0.8] result = plugin.process(self.cube, weights) - self.assertAlmostEquals(result.data[0, 0], 1.8) + expected_result_array = np.ones((2, 2))*1.8 + self.assertArrayAlmostEqual(result.data, expected_result_array) def test_weights_equal_array(self): """Test it works with weights set to array (0.8, 0.2)""" @@ -146,7 +157,8 @@ def test_weights_equal_array(self): plugin = BasicWeightedAverage(coord) weights = np.array([0.8, 0.2]) result = plugin.process(self.cube, weights) - self.assertAlmostEquals(result.data[0, 0], 1.2) + expected_result_array = np.ones((2, 2))*1.2 + self.assertArrayAlmostEqual(result.data, expected_result_array) if __name__ == '__main__': diff --git a/lib/improver/weighted_blend.py b/lib/improver/weighted_blend.py index f93161cb0f..a938950bb7 100644 --- a/lib/improver/weighted_blend.py +++ b/lib/improver/weighted_blend.py @@ -29,16 +29,14 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """Module containing Weighted Blend classes.""" +import warnings - -import iris import numpy as np +import iris class BasicWeightedAverage(object): - """Apply a Basic Weighted Average to a cube. - - """ + """Apply a Basic Weighted Average to a cube.""" def __init__(self, coord, coord_adjust=None): """Set up for a Basic Weighted Average Blending plugin @@ -46,7 +44,6 @@ def __init__(self, coord, coord_adjust=None): Args: coord : string The name/s of a coordinate dimension/s in the cube - coord_adjust : Function to apply to the coordinate after collapsing the cube to correct the values for example for time windowing and @@ -68,7 +65,6 @@ def process(self, cube, weights=None): Args: cube : iris.cube.Cube Cube to blend across the coord. - weights: Optional list or np.array of weights or None (equivalent to equal weights) @@ -77,24 +73,26 @@ def process(self, cube, weights=None): """ if not isinstance(cube, iris.cube.Cube): - raise ValueError('the first argument must be an instance of ' + - 'iris.cube.Cube') + raise ValueError('The first argument must be an instance of ' + + 'iris.cube.Cube but is' + + ' {0:s}'.format(type(cube))) if not cube.coords(self.coord): - raise ValueError('the coord for this plugin must be ' + + raise ValueError('The coord for this plugin must be ' + 'an existing coordinate in the input cube') # Find the coords dimension. # If coord is a scalar_coord try adding it collapse_dim = cube.coord_dims(self.coord) if not collapse_dim: - print 'Warning: Could not find collapse dimension ' + \ - 'will try adding it' + msg = ('Could not find collapse dimension, ' + + 'will try adding it') + warnings.warn(msg) cube = iris.util.new_axis(cube, self.coord) collapse_dim = cube.coord_dims(self.coord) # supply weights as an array of weights whose shape matches the cube weights_array = None if weights is not None: if np.array(weights).shape != cube.coord(self.coord).points.shape: - raise ValueError('the weights array must match the shape ' + + raise ValueError('The weights array must match the shape ' + 'of the coordinate in the input cube') weights_array = iris.util.broadcast_to_shape(np.array(weights), cube.shape, From 9bdcc33a90a1523af7004b0fe7ad4c8259501318 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Wed, 24 May 2017 12:52:21 +0100 Subject: [PATCH 0029/1367] Add help and tests for improver tests --- bin/improver-tests | 17 +++++++++++++++++ tests/improver-tests/00-help.bats | 16 ++++++++++++++++ tests/improver-tests/01-bad-option.bats | 16 ++++++++++++++++ 3 files changed, 49 insertions(+) create mode 100644 tests/improver-tests/00-help.bats create mode 100644 tests/improver-tests/01-bad-option.bats diff --git a/bin/improver-tests b/bin/improver-tests index 046bb1a8e0..ee8f7cdeac 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -18,6 +18,23 @@ function echo_ok { echo -e "\033[1;32m[OK]\033[0m $1" } +if [[ ${1:-} == '--help' ]] || [[ ${1:-} == '-h' ]]; then + cat <<'__USAGE__' +improver tests [--debug] + +Run pep8, pylint, unit and CLI acceptance tests. + +Optional arguments: + --debug Run in verbose mode (may take longer for CLI) + -h, --help Show this message and exit +__USAGE__ + exit 0 +fi +if [[ -n "${1:-}" && ${1:-} != '--debug' ]]; then + improver tests --help + exit 2 +fi + cd $IMPROVER_DIR/lib # PEP8 testing. diff --git a/tests/improver-tests/00-help.bats b/tests/improver-tests/00-help.bats new file mode 100644 index 0000000000..49e1abe5e9 --- /dev/null +++ b/tests/improver-tests/00-help.bats @@ -0,0 +1,16 @@ +#!/usr/bin/env bats + +@test "tests -h" { + run improver tests -h + [[ "$status" -eq 0 ]] + read -d '' expected <<'__HELP__' || true +improver tests [--debug] + +Run pep8, pylint, unit and CLI acceptance tests. + +Optional arguments: + --debug Run in verbose mode (may take longer for CLI) + -h, --help Show this message and exit +__HELP__ + [[ "$output" == "$expected" ]] +} diff --git a/tests/improver-tests/01-bad-option.bats b/tests/improver-tests/01-bad-option.bats new file mode 100644 index 0000000000..d86e827200 --- /dev/null +++ b/tests/improver-tests/01-bad-option.bats @@ -0,0 +1,16 @@ +#!/usr/bin/env bats + +@test "tests bad option" { + run improver tests --silly-option + [[ "$status" -eq 2 ]] + read -d '' expected <<'__HELP__' || true +improver tests [--debug] + +Run pep8, pylint, unit and CLI acceptance tests. + +Optional arguments: + --debug Run in verbose mode (may take longer for CLI) + -h, --help Show this message and exit +__HELP__ + [[ "$output" == "$expected" ]] +} From c2c97618fdab29efcde3d68f404b9f5c172c379e Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Mon, 22 May 2017 08:57:10 +0100 Subject: [PATCH 0030/1367] Add pylintrc symlink for Codacy --- .pylintrc | 1 + 1 file changed, 1 insertion(+) create mode 120000 .pylintrc diff --git a/.pylintrc b/.pylintrc new file mode 120000 index 0000000000..41cc413ce4 --- /dev/null +++ b/.pylintrc @@ -0,0 +1 @@ +etc/pylintrc \ No newline at end of file From 684ec99fe8dd870eed1d34d6426d7d625ffeac8b Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 10:34:41 +0100 Subject: [PATCH 0031/1367] Files comprising spotdata routine. Unit test for neighbour finding and data extraction routines. Pylint-ed (except where it disagrees with pep8). Pep8-ed. --- lib/improver/constants.py | 42 ++ lib/improver/spotdata/__init__.py | 8 + lib/improver/spotdata/ancillaries.py | 109 +++ lib/improver/spotdata/common_functions.py | 240 +++++++ lib/improver/spotdata/configurations.py | 156 ++++ lib/improver/spotdata/extract_data.py | 336 +++++++++ lib/improver/spotdata/extrema.py | 112 +++ lib/improver/spotdata/framework.py | 242 +++++++ lib/improver/spotdata/neighbour_finding.py | 445 ++++++++++++ lib/improver/spotdata/read_input.py | 107 +++ lib/improver/spotdata/site_data.py | 174 +++++ .../spotdata/tests/test_extract_data.py | 372 ++++++++++ .../spotdata/tests/test_neighbour_finding.py | 679 ++++++++++++++++++ lib/improver/spotdata/times.py | 74 ++ lib/improver/spotdata/write_output.py | 50 ++ 15 files changed, 3146 insertions(+) create mode 100644 lib/improver/constants.py create mode 100644 lib/improver/spotdata/__init__.py create mode 100644 lib/improver/spotdata/ancillaries.py create mode 100644 lib/improver/spotdata/common_functions.py create mode 100644 lib/improver/spotdata/configurations.py create mode 100644 lib/improver/spotdata/extract_data.py create mode 100644 lib/improver/spotdata/extrema.py create mode 100644 lib/improver/spotdata/framework.py create mode 100644 lib/improver/spotdata/neighbour_finding.py create mode 100644 lib/improver/spotdata/read_input.py create mode 100644 lib/improver/spotdata/site_data.py create mode 100644 lib/improver/spotdata/tests/test_extract_data.py create mode 100644 lib/improver/spotdata/tests/test_neighbour_finding.py create mode 100644 lib/improver/spotdata/times.py create mode 100644 lib/improver/spotdata/write_output.py diff --git a/lib/improver/constants.py b/lib/improver/constants.py new file mode 100644 index 0000000000..2245304b5e --- /dev/null +++ b/lib/improver/constants.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module to contain generally useful constants.""" + +# Real Missing Data Indicator +RMDI = -32767.0 + +# Specific gas constant for dry air (J K-1 kg-1) +R_DRY_AIR = 287.0 + +# Specific heat capacity of dry air (J K-1 kg-1) +CP_DRY_AIR = 1005.0 + + diff --git a/lib/improver/spotdata/__init__.py b/lib/improver/spotdata/__init__.py new file mode 100644 index 0000000000..631c0ab730 --- /dev/null +++ b/lib/improver/spotdata/__init__.py @@ -0,0 +1,8 @@ +""" +Provides support routines. +""" + +import os + +# Path to the ancillary data. +_root_dir = os.path.dirname(__file__) diff --git a/lib/improver/spotdata/ancillaries.py b/lib/improver/spotdata/ancillaries.py new file mode 100644 index 0000000000..f26cfe4e52 --- /dev/null +++ b/lib/improver/spotdata/ancillaries.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +A module that loads and makes accessible shared data such as orography and +land masks. + +""" + +from os import environ as Environ +from improver.spotdata.read_input import Load + + +def get_ancillary_data(diagnostics): + ''' + Takes in a list of desired diagnostics and determines which ancillary + (i.e. non-time dependent) fields are required given their neighbour + finding or data extraction methods. + + Args: + ----- + diagnostics: dictionary containing each diagnostic to be processed with + associated options for how they should be produced, e.g. + method of neighbour selection, method of data extraction etc. + + Returns: + -------- + ancillary_data: + dictionary containing named ancillary data; the key gives the + name and the item is the iris.cube.Cube of data. + + ''' + ANCILLARY_PATH = Environ.get('ANCILLARY_PATH') + + ancillary_data = {} + + orography = Load('single_file').process( + ANCILLARY_PATH + '/orography.nc', 'surface_altitude') + + ancillary_data.update({'orography': orography}) + + # Check if the land mask is used for any diagnostics. + if any([('land' in diagnostics[key]['neighbour_finding']) + for key in diagnostics.keys()]): + + land = Load('single_file').process( + ANCILLARY_PATH + '/land_mask.nc', 'land_binary_mask') + + ancillary_data.update({'land': land}) + + return ancillary_data + + +# Function that checks the presence of ancillary data when it is used and +# raises an exception if it is missing. + +def data_from_ancillary(ancillary_data, key): + ''' + Check for an iris.cube.Cube of information in the ancillary data + dictionary. + + Args: + ----- + ancillary_data : ancillary_data dictionary defined by get_ancillary_data + function. + key : name of ancillary field requested. + + Returns: + -------- + iris.cube.Cube.data from the . + + Raises: + ------- + Exception if the cube has not been loaded. + + ''' + + if ancillary_data is not None and ancillary_data[key]: + return ancillary_data[key].data + else: + raise Exception('Ancillary data {} has not been loaded.'.format(key)) diff --git a/lib/improver/spotdata/common_functions.py b/lib/improver/spotdata/common_functions.py new file mode 100644 index 0000000000..6fecfaa622 --- /dev/null +++ b/lib/improver/spotdata/common_functions.py @@ -0,0 +1,240 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Plugins written for the Improver site specific process chain. + +""" + +import numpy as np +from iris import Constraint +from iris.time import PartialDateTime + + +class ConditionalListExtract(object): + ''' + Performs a numerical comparison, the type selected with method, of data + in an array and returns an array of indices in that data array that + fulfill the comparison. + + Args: + ----- + method : which comparison to make, e.g. not_equal_to. + data : array of values to be filtered. + indices_list : list of indices in the data array that should be + considered. + comparison_value: the value against which numbers in data are to be + compared. + + Returns: + -------- + array_of_indices.tolist(): + a list of the the indices of data values that fulfill the + comparison condition. + ''' + + def __init__(self, method): + self.method = method + + def process(self, data, indices_list, comparison_value): + ''' Call the data comparison method passed in''' + array_of_indices = np.array(indices_list) + function = getattr(self, self.method) + subset = function(data, array_of_indices, comparison_value) + + return array_of_indices[0:2, subset[0]].tolist() + + @staticmethod + def less_than(data, array_of_indices, comparison_value): + ''' Return indices of array for which value < comparison_value ''' + return np.where( + data[[array_of_indices[0], + array_of_indices[1]]] < comparison_value + ) + + @staticmethod + def greater_than(data, array_of_indices, comparison_value): + ''' Return indices of array for which value > comparison_value ''' + return np.where( + data[[array_of_indices[0], + array_of_indices[1]]] > comparison_value + ) + + @staticmethod + def equal_to(data, array_of_indices, comparison_value): + ''' Return indices of array for which value == comparison_value ''' + return np.where( + data[[array_of_indices[0], + array_of_indices[1]]] == comparison_value + ) + + @staticmethod + def not_equal_to(data, array_of_indices, comparison_value): + ''' Return indices of array for which value != comparison_value ''' + return np.where( + data[[array_of_indices[0], + array_of_indices[1]]] != comparison_value + ) + + +# Common shared functions + +def nearest_n_neighbours(i, j, no_neighbours, exclude_self=False): + ''' + Returns a coordinate list of n points comprising the original + coordinate (i,j) plus the n-1 neighbouring points on a cartesian grid. + e.g. n = 9 + + (i-1, j-1) | (i-1, j) | (i-1, j+1) + ---------------------------------- + (i, j-1) | (i, j) | (i, j+1) + ---------------------------------- + (i+1, j-1) | (i+1, j) | (i+1, j+1) + + n must be in the sequence (2*d(ij) + 1)**2 where d(ij) is the +- in the + index (1,2,3, etc.); equivalently sqrt(n) is an odd integer and n >= 9. + + exclude_self = True will return the list without the i,j point about which + the list was constructed. + + Args: + ----- + i, j : central coordinate about which to find neighbours. + no_neighbours : no. of neighbours to return (9, 25, 49, etc). + exclude_self : boolean, if True, (i,j) excluded from returned list. + + Returns: + -------- + list of array indices that neighbour the central (i,j) point. + + ''' + # Check n is a valid no. for neighbour finding. + root_no_neighbours = np.sqrt(no_neighbours) + delta_neighbours = (root_no_neighbours - 1)/2 + if not np.mod(delta_neighbours, 1) == 0 or delta_neighbours < 1: + raise ValueError( + 'Invalid neareat no. of neighbours request. N={} is not a valid ' + 'square no. (sqrt N must be odd)'.format(no_neighbours)) + + delta_neighbours = int(delta_neighbours) + n_indices = [(i+a, j+b) + for a in range(-delta_neighbours, delta_neighbours+1) + for b in range(-delta_neighbours, delta_neighbours+1)] + if exclude_self is True: + n_indices.pop(no_neighbours/2) + return np.array( + [np.array(n_indices)[:, 0], np.array(n_indices)[:, 1]] + ).astype(int).tolist() + + +def node_edge_test(node_list, cube): + ''' + Node lists produced using the nearest_n_neighbours function may overspill + the domain of the array from which data is to be extracted. This function + checks whether the cube of interest is a global domain with a wrapped + boundary using the iris.cube.Cube.coord().circular property. In cases of + wrapped boundaries, the neighbouring points addresses are appropriately + modified. Otherwise the points are discarded. + + Args + ---- + node_list : list[[i],[j]] of indices. + cube : the cube for which data will be extracted using the + indices (e.g. cube.data[node_list]). + + Returns + ------- + node_list : modified node_list with points beyond the cube boundary + either changed or discarded as appropriate. + + ''' + + node_list = np.array(node_list) + + for k, coord in enumerate(['y', 'x']): + coord_max = cube.coord(axis=coord).shape[0] + circular = cube.coord(axis=coord).circular + max_list = np.where(node_list[k] >= coord_max)[0].tolist() + min_list = np.where(node_list[k] < 0)[0].tolist() + if circular: + node_list[k, min_list] = node_list[k, min_list] + coord_max + node_list[k, max_list] = node_list[k, max_list] - coord_max + else: + node_list = np.delete(node_list, + np.hstack((min_list, max_list)), 1) + + return node_list.tolist() + + +def get_nearest_coords(cube, latitude, longitude): + ''' + Uses the iris cube method nearest_neighbour_index to find the nearest grid + points to a given latitude-longitude position. + ''' + + i_latitude = (cube.coord(axis='y').nearest_neighbour_index(latitude)) + j_longitude = (cube.coord(axis='x').nearest_neighbour_index(longitude)) + return i_latitude, j_longitude + + +def index_of_minimum_difference(whole_list, subset_list=None): + ''' + Returns the index of the minimum value in a list. + ''' + if subset_list is None: + subset_list = np.arange(len(whole_list)) + return subset_list[np.argmin(abs(whole_list[subset_list]))] + + +def list_entry_from_index(list_in, index_in): + ''' + Extracts index_in element from each list in a list of lists, and returns + as a list. + e.g. + list_in = [[0,1,2],[5,6,7],[8,9,10]] + index_in = 1 + Returns [1,6,9] + + ''' + n_columns = len(list_in) + return [list_in[n_col][index_in] for n_col in range(n_columns)] + + +def datetime_constraint(time_in): + ''' + Constructs an iris equivalence constraint from a python datetime object. + + ''' + return Constraint( + time=PartialDateTime(*[int(time_in.strftime("%{}".format(x))) + for x in + ['Y', 'm', 'd', 'H']]) + ) diff --git a/lib/improver/spotdata/configurations.py b/lib/improver/spotdata/configurations.py new file mode 100644 index 0000000000..5839fd6137 --- /dev/null +++ b/lib/improver/spotdata/configurations.py @@ -0,0 +1,156 @@ +""" +Test data extraction configuration + +""" +from os import environ as Environ +DIAGNOSTIC_FILE_PATH = Environ.get('DIAGNOSTIC_FILE_PATH') + + +def all_diagnostics(): + ''' + Defines how all available diagnostics should be processed. A custom name + used to key the returned dictionary allows for multiple variations on the + derivation of any one diagnostic for different products. + + e.g. + temperature - might use intelligent grid point neighbour finding to help + near the coasts, and use a model_level_temperature_lapse_rate + to adjust the extracted data for unresolved topography. + + temperature_simple - might use simple nearest neighbour finding and + use_nearest data extraction to simply take the value + from that neighbouring point. + + ''' + diagnostic_recipes = { + 'temperature': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'temperature_at_screen_level' + '*'), + 'diagnostic_name': 'air_temperature', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': True + }, + 'temperature_orog': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'temperature_at_screen_level' + '*'), + 'diagnostic_name': 'air_temperature', + 'neighbour_finding': 'default', + 'interpolation_method': 'orography_derived_temperature_lapse_rate', + 'extrema': True + }, + 'wind_speed': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'horizontal_wind_speed_and_direction_at_10m' + '*'), + 'diagnostic_name': 'wind_speed', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'wind_direction': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'horizontal_wind_speed_and_direction_at_10m' + '*'), + 'diagnostic_name': 'wind_from_direction', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'visibility': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'visibility_at_screen_level' + '*'), + 'diagnostic_name': 'visibility_in_air', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': True + }, + 'relative_humidity': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'relative_humidity_at_screen_level' + '*'), + 'diagnostic_name': 'relative_humidity', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'surface_pressure': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'surface_pressure' + '*'), + 'diagnostic_name': 'surface_air_pressure', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'low_cloud_amount': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'low_cloud_amount' + '*'), + 'diagnostic_name': 'low_type_cloud_area_fraction', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'medium_cloud_amount': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'medium_cloud_amount' + '*'), + 'diagnostic_name': 'medium_type_cloud_area_fraction', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'high_cloud_amount': { + 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'high_cloud_amount' + '*'), + 'diagnostic_name': 'high_type_cloud_area_fraction', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + }, + 'total_cloud_amount': { + 'filepath': DIAGNOSTIC_FILE_PATH + '/*/*' + 'total_cloud' + '*', + 'diagnostic_name': 'cloud_area_fraction', + 'neighbour_finding': 'default', + 'interpolation_method': 'use_nearest', + 'extrema': False + } + } + return diagnostic_recipes + + +def define_diagnostics(configuration): + ''' + Define the configurations with which spotdata may be run. These + configurations specify which diagnostic definitions to include + when processing for a given product. + + The routine also defines a default method of grid point neighbour + finding diagnostics configurations that simply refer to 'default'. + + Args: + ----- + configuration : A string used as a key in the configuration dictionary + to select the configuration for use. + + Returns: + -------- + Dictionary containing the diagnostics to be processed and the definition + of how to process them. + + ''' + diagnostics = all_diagnostics() + + configuration_dict = { + 'pws_default': + ['temperature', 'wind_speed', 'wind_direction', 'visibility', + 'relative_humidity', 'surface_pressure', 'low_cloud_amount', + 'medium_cloud_amount', 'high_cloud_amount', 'total_cloud_amount'], + + 'short_test': + ['temperature', 'wind_speed'] + } + + neighbour_finding_default = { + 'pws_default': 'fast_nearest_neighbour', + 'short_test': 'fast_nearest_neighbour' + } + + return (neighbour_finding_default[configuration], + dict((key, diagnostics[key]) + for key in configuration_dict[configuration])) diff --git a/lib/improver/spotdata/extract_data.py b/lib/improver/spotdata/extract_data.py new file mode 100644 index 0000000000..7c103dfefe --- /dev/null +++ b/lib/improver/spotdata/extract_data.py @@ -0,0 +1,336 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Gridded data extraction for the Improver site specific process chain.""" + +import numpy as np +from iris import FUTURE +from iris.coords import AuxCoord, DimCoord +from iris import Constraint +from iris.cube import Cube, CubeList +from numpy.linalg import lstsq +from improver.spotdata.common_functions import (nearest_n_neighbours, + datetime_constraint, + node_edge_test) +from improver.spotdata.ancillaries import data_from_ancillary +from improver.spotdata.read_input import get_additional_diagnostics +from improver.constants import (R_DRY_AIR, + CP_DRY_AIR) + +FUTURE.cell_datetime_objects = True + + +class ExtractData(object): + ''' + A series of methods for extracting data from grid points to derive + diagnostic values at off grid positions. + + ''' + + def __init__(self, method='use_nearest'): + """ + The class is called with the desired method to be used in extracting/ + interpolating data to the site of interest from gridded data. + + """ + + self.method = method + + def process(self, cubes, sites, neighbours, forecast_times, + additional_data, **kwargs): + """ + Call the correct function to enact the method of data extraction + specified. This function also handles multiple timesteps, consolidating + the resulting data cubes into an Iris.CubeList. + + Args: + ----- + cubes: iris.cube.CubeList of diagnostic data spanning available + times. + sites: Dictionary of site data, including lat/lon and altitude + information. + forecast_times: + A list of datetime objects representing forecast times for + which data is required. + neighbours: A list of neigbouring grid points that corresponds to sites + in the SortedDictionary of sites. + additional_data: + A dictionary containing any supplmentary time varying + diagnostics that are needed for the selected extraction + method. + ancillary_data: + A dictionary containing additional model data that + is needed. e.g. {'orography': } + + Returns: + -------- + resulting_cubes: + An iris.CubeList of irregular (i.e. non-gridded) cubes of + data that correspond to the sites of interest at the times + of interest. + """ + + if forecast_times is None: + raise Exception("No forecast times provided.") + + resulting_cubes = CubeList() + function = getattr(self, self.method) + for a_time in forecast_times: + time_extract = datetime_constraint(a_time) + cube_in, = cubes.extract(time_extract) + + if additional_data is not None: + for key in additional_data.keys(): + ad_time, = additional_data[key].extract(time_extract) + kwargs.update({key: ad_time}) + + resulting_cubes.append( + function(cube_in, sites, neighbours, **kwargs) + ) + + return resulting_cubes + + @staticmethod + def _build_coordinates(latitudes, longitudes, site_ids, gmtoffsets): + ''' + Construct coordinates for the irregular iris.Cube containing site data. + A single dimensional coordinate is created using the running order, + whilst the non-monotonically increasing coordinates (e.g. bestdata_id) + are stored in AuxilliaryCoordinates. + + Args: + ----- + latitudes : A list of latitudes ordered to correspond with the sites + OrderedDict. + longitudes : A list of longitudes ordered to correspond with the sites + OrderedDict. + site_ids : A list of bestdata site_ids ordered to correspond with the + sites OrderedDict. + gmtoffsets : A list of gmt off sets in hours ordered to correspond with + the sites OrderedDict. + + Returns: + -------- + Creates iris.DimCoord and iris.AuxCoord objects from the provided data + for use in constructing new cubes. + ''' + indices = DimCoord(np.arange(len(latitudes)), long_name='index', + units='1') + bd_ids = AuxCoord(site_ids, long_name='bestdata_id', units='1') + latitude = AuxCoord(latitudes, standard_name='latitude', + units='degrees') + longitude = AuxCoord(longitudes, standard_name='longitude', + units='degrees') + gmtoffset = AuxCoord(gmtoffsets, long_name='gmtoffset', + units='hours') + return indices, bd_ids, latitude, longitude, gmtoffset + + def make_cube(self, cube, data, sites): + ''' + Construct and return a cube containing the data extracted from the + grids by the desired method for the sites provided. + + ''' + latitudes = [site['latitude'] for site in sites.itervalues()] + longitudes = [site['longitude'] for site in sites.itervalues()] + gmtoffsets = [site['gmtoffset'] for site in sites.itervalues()] + site_ids = sites.keys() + + indices, bd_ids, latitude, longitude, gmtoffset = ( + self._build_coordinates( + latitudes, longitudes, site_ids, gmtoffsets)) + + # Add leading dimension for time. + data.resize(1, len(data)) + result_cube = Cube(data, + long_name=cube.name(), + dim_coords_and_dims=[(cube.coord('time'), 0), + (indices, 1)], + aux_coords_and_dims=[(latitude, 1), + (longitude, 1), + (gmtoffset, 1), + (bd_ids, 1)], + units=cube.units) + + # Enables use of long_name above for any name, and then moves it + # to a standard name if possible. + result_cube.rename(cube.name()) + return result_cube + + def use_nearest(self, cube, sites, neighbours, ancillary_data=None): + ''' + Simplest case, in which the diagnostic data value at the nearest grid + point, as determined by the chosen PointSelection method, is used for + the site. + + ''' + if (not cube.coord_dims(cube.coord(axis='y').name())[0] == 0 or + not cube.coord_dims(cube.coord(axis='x').name())[0] == 1): + raise Exception("Cube dimensions not as expected.") + + data = cube.data[neighbours['i'], neighbours['j']] + return self.make_cube(cube, data, sites) + + def orography_derived_temperature_lapse_rate(self, cube, sites, neighbours, + ancillary_data=None): + ''' + Crude lapse rate method that uses temperature variation and height + variation across local nodes to derive lapse rate. This is highly + prone to noise given the small number of points involved and the + variable degree to which elevation changes across the small number + of points. + + ''' + def local_lapse_rate(cube, orography, node_list): + ''' + Least-squares fit to local temperature and altitude data for grid + points defined by node_list to calculate a local lapse rate. + + ''' + y_data = cube.data[node_list] + x_data = orography[node_list] + matrix = np.vstack([x_data, np.ones(len(x_data))]).T + gradient, intercept = lstsq(matrix, y_data)[0] + return [gradient, intercept] + + orography = data_from_ancillary(ancillary_data, 'orography') + data = np.empty(shape=(len(sites))) + + for i_site, site in enumerate(sites.itervalues()): + altitude = site['altitude'] + + i, j = neighbours['i'][i_site], neighbours['j'][i_site] + edgecase = neighbours['edge'] + node_list = nearest_n_neighbours(i, j, 9) + if edgecase: + node_list = node_edge_test(node_list, cube) + + llr = local_lapse_rate(cube, orography, node_list) + data[i_site] = llr[0]*altitude + llr[1] + + return self.make_cube(cube, data, sites) + + def model_level_temperature_lapse_rate(self, cube, sites, neighbours, + ancillary_data=None, + pressure_on_height_levels=None, + surface_pressure=None, + temperature_on_height_levels=None): + ''' + Lapse rate method based on potential temperature. Follows the work of + S.B. Vosper 2005 (Near-surface temperature variations over complex + terrain). + + ''' + if (pressure_on_height_levels is None or + surface_pressure is None or + temperature_on_height_levels is None): + raise Exception( + "Required additional data is unset: \n" + "pressure_on_height_levels = {}\n" + "temperature_on_height_levels = {}\n" + "surface_pressure = {}\n".format( + pressure_on_height_levels, + temperature_on_height_levels, + surface_pressure) + ) + + pressure_on_height_levels.convert_units('hPa') + surface_pressure.convert_units('hPa') + + h50con = Constraint(height=50) + t50 = temperature_on_height_levels.extract(h50con) + p50 = pressure_on_height_levels.extract(h50con) + Kappa = R_DRY_AIR/CP_DRY_AIR + + data = np.empty(shape=(len(sites))) + for i_site in range(len(sites)): + + i, j, dz = (neighbours['i'][i_site], neighbours['j'][i_site], + neighbours['dz'][i_site]) + + # Use neighbour grid point value if vertical displacement=0. + if dz == 0.: + data[i_site] = cube.data[i, j] + continue + + t_upper = t50.data[i, j] + p_upper = p50.data[i, j] + t_surface = cube.data[i, j] + p_surface = surface_pressure.data[i, j] + + p_grad = (p_upper - p_surface)/50. + p_site = p_surface + p_grad*dz + + theta_upper = t_upper*(1000./p_upper)**Kappa + theta_surface = t_surface*(1000./p_surface)**Kappa + dthetadz = (theta_upper - theta_surface)/50. + + if abs(dz) < 1.: + t1p5 = t_surface + else: + dz = min(abs(dz), 70.)*np.sign(dz) + if dthetadz > 0: + t1p5 = theta_surface*(p_site/1000.)**Kappa + else: + t1p5 = (theta_surface + dz*dthetadz)*(p_site/1000.)**Kappa + + data[i_site] = t1p5 + + return self.make_cube(cube, data, sites) + + +def get_method_prerequisites(method): + ''' + Determine which additional diagnostics are required for a given + method of data extraction. + + Args: + ----- + method : The method of data extraction that is being used. + + Returns: + -------- + ad : A dictionary keyed with the diagnostic names and containing the + additional cubes that are required. + + ''' + if method == 'model_level_temperature_lapse_rate': + additional_diagnostics = [ + 'temperature_on_height_levels', + 'pressure_on_height_levels', + 'surface_pressure'] + else: + return None + + ad = {} + for item in additional_diagnostics: + ad.update({item: get_additional_diagnostics(item)}) + return ad diff --git a/lib/improver/spotdata/extrema.py b/lib/improver/spotdata/extrema.py new file mode 100644 index 0000000000..b7e974dab8 --- /dev/null +++ b/lib/improver/spotdata/extrema.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Gridded data extraction for the Improver site specific process chain.""" + +import numpy as np +from iris.analysis import MAX as IMAX +from iris.analysis import MIN as IMIN +from iris import FUTURE +from improver.spotdata.write_output import WriteOutput + +FUTURE.cell_datetime_objects = True + + +class ExtractExtrema(object): + '''Extract diagnostic maxima and minima in a given time period.''' + + def __init__(self, method): + """ + The class is called with the desired method, in this case the period + over which to calculate the extrema values. + + This all needs to be updated to work properly with local times if that + is desirable, and to present additional options. And to actually + function as advertised. + + INCOMPLETE. + + """ + self.method = method + + def process(self, cube): + '''Call the required method''' + function = getattr(self, self.method) + function(cube) + + @staticmethod + def In24hr(cube): + ''' + Calculate extrema values for diagnostic in cube over 24 hour period. + + Args: + ----- + cube : Cube of diagnostic data. + + Returns: + -------- + Nil. Writes out cube of extrema data. + + ''' + cube.coord('time').points = cube.coord('time').points.astype(np.int64) + + cube_max = cube.collapsed('time', IMAX) + cube_min = cube.collapsed('time', IMIN) + + cube_max.long_name = cube_max.name() + '_max' + cube_min.long_name = cube_min.name() + '_min' + cube_max.standard_name = None + cube_min.standard_name = None + + cube.coord('time').points = cube.coord('time').points.astype(np.int32) + + WriteOutput('as_netcdf').process(cube_max) + WriteOutput('as_netcdf').process(cube_min) + +# def local_dates_in_cube(cube): +# ''' +# Incomplete work on using local date time information. +# +# OUT OF DATE AND INCOMPLETE. +# ''' +# from datetime import timedelta as timedelta +# +# dates_in_cube = unit.num2date( +# b.coord('time').points, b.coord('time').units.name, +# b.coord('time').units.calendar) +# +# start_time = dates_in_cube[0] - timedelta(hours=12) +# if start_time.hour < 18: +# start_day = start_time.date() +# else: +# start_day = dates_in_cube[0].date() +# +# end_time = (dates_in_cube[-1] + timedelta(hours=12)).date() diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py new file mode 100644 index 0000000000..9888f72da3 --- /dev/null +++ b/lib/improver/spotdata/framework.py @@ -0,0 +1,242 @@ +""" +The framework for site specific post-processing. + +""" + +import argparse +import multiprocessing as mp +from os import environ as Environ + +from improver.spotdata.read_input import Load +from improver.spotdata.neighbour_finding import PointSelection +from improver.spotdata.extract_data import (ExtractData, + get_method_prerequisites) +from improver.spotdata.write_output import WriteOutput +from improver.spotdata.ancillaries import get_ancillary_data +from improver.spotdata.extrema import ExtractExtrema +from improver.spotdata.site_data import ImportSiteData +from improver.spotdata.times import get_forecast_times +from improver.spotdata.configurations import define_diagnostics + + +def run_framework(config_name, latitudes=None, longitudes=None, + altitudes=None, site_ids=None, forecast_date=None, + forecast_time=None, forecast_length=None, + use_multiprocessing=False): + ''' + A framework that calls the components of the spotdata code. This includes + building site data into a suitable format, finding grid neighbours to + those sites with the chosen method, and then extracting data with the + chosen method. The final results are written out to new irregularly + gridded iris.cube.Cubes. + + Args: + ----- + config_name : A string giving the chosen configuration with which + to run the spotdata system. e.g. pws_default which + will produce the required diagnostics for this + product. + latitudes : A list of latitudes for running on the fly for a + custom set of sites. The order should correspond + to the subsequent latitudes and altitudes variables + to construct each site. + longitudes : A list of longitudes for running on the fly for a + custom set of sites. + altitudes : A list of altitudes for running on the fly for a + custom set of sites. + site_ids : A list of site_ids to associate with the above on + the fly constructed sites. This must be ordered the + same as the latitudes/longitudes/altitudes lists. + forecast_date : A string of format YYYYMMDD defining the start date + for which forecasts are required. + forecast_time : An integer giving the hour on the forecast_date at + which to start the forecast output; 24hr clock such + that 17 = 17Z for example. + forecast_length : An integer giving the desired length of the forecast + output in hours (e.g. 48 for a two day forecast + period). + use_multiprocessing : A boolean determining whether to use multiprocessing + in the data extraction component of the code. + + Returns: + -------- + Nil. + + ''' + + # Establish forecast time list based upon input specifications, or if not + # provided, use defaults. + forecast_times = get_forecast_times(forecast_date=forecast_date, + forecast_time=forecast_time, + forecast_length=forecast_length) + + # If using locations set at command line, set optional information such + # as site altitude and site_id. + if latitudes and longitudes: + optionals = {} + if altitudes is not None: + optionals.update({'altitudes': altitudes}) + if site_ids is not None: + optionals.update({'site_ids': site_ids}) + + sites = ImportSiteData('runtime_list').process(latitudes, longitudes, + **optionals) + + # Clumsy implementation of grabbing the BD pickle file if no sites are + # specified. + if latitudes is None or longitudes is None: + site_path = Environ.get('SITE_PATH') + site_path = (site_path + '/bestdata2_locsDB.pkl') + sites = ImportSiteData('pickle_file').process(site_path) + + # Use the selected config to estabilish which diagnostics are required. + # Also gets the default method of selecting grid point neighbours for the + # given configuration. + neighbour_finding_default, diagnostics = define_diagnostics(config_name) + + # Load ancillary data files; fields that don't vary in time. + ancillary_data = get_ancillary_data(diagnostics) + + # Construct a set of neighbour_finding methods to be used in this run. + neighbour_schemes = list( + set([diagnostics[x]['neighbour_finding'] + for x in diagnostics.keys()])) + neighbour_schemes.remove('default') + + # Set up site-grid point neighbour list using default method. Other IGPS + # methods will use this as a starting point so it must always be done. + neighbours = {} + neighbours.update( + {'default': + PointSelection(neighbour_finding_default).process( + ancillary_data['orography'], sites, + ancillary_data=ancillary_data) + }) + + # Set up site-grid point neighbour lists for all IGPS methods being used. + for scheme in neighbour_schemes: + neighbours.update( + {scheme: + PointSelection(scheme).process( + ancillary_data['orography'], sites, + ancillary_data=ancillary_data, + default_neighbours=neighbours['default']) + }) + + if use_multiprocessing: + # Process diagnostics on separate threads is multiprocessing is + # selected. Determine number of diagnostics to establish + # multiprocessing pool size. + n_diagnostic_threads = min(len(diagnostics.keys()), mp.cpu_count()) + + # Establish multiprocessing pool - each diagnostic processed on its + # own thread. + diagnostic_pool = mp.Pool(processes=n_diagnostic_threads) + + for key in diagnostics.keys(): + diagnostic = diagnostics[key] + diagnostic_pool.apply_async( + process_diagnostic, + args=( + diagnostic, neighbours, sites, forecast_times, + ancillary_data)) + + diagnostic_pool.close() + diagnostic_pool.join() + + else: + # Process diagnostics serially on one thread. + for key in diagnostics.keys(): + diagnostic = diagnostics[key] + process_diagnostic(diagnostic, neighbours, sites, forecast_times, + ancillary_data) + + +def process_diagnostic(diagnostic, neighbours, sites, forecast_times, + ancillary_data): + ''' + Extract data and write output for a given diagnostic. + + Args: + ----- + diagnostic : String naming the diagnostic to be processed. + neighbours : Dictionary of gridpoint neighbours to each site produced + by the different available neighbour finding methods that + have been used in the chosen configuration. + sites : Dictionary of spotdata sites to process. + forecast_times : Python datetime objects specifying the times for which + forecast diagnostics are required. + ancillary_data : Dictionary of time invariant fields that may be used by + the data extraction methods (e.g. orography). + + Returns: + -------- + Nil. + + ''' + # print 'neighbour finding with ', diagnostic['neighbour_finding'] + # print 'using interpolation method ', diagnostic['interpolation_method'] + + data = Load('multi_file').process(diagnostic['filepath'], + diagnostic['diagnostic_name']) + neighbour_list = neighbours[diagnostic['neighbour_finding']] + + additional_data = get_method_prerequisites( + diagnostic['interpolation_method']) + + cubes_out = ExtractData( + diagnostic['interpolation_method'] + ).process(data, sites, neighbour_list, forecast_times, + additional_data, ancillary_data=ancillary_data) + + cube_out, = cubes_out.concatenate() + + if diagnostic['extrema']: + ExtractExtrema('In24hr').process(cube_out) + + WriteOutput('as_netcdf').process(cube_out) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='SSPS.') + parser.add_argument('config_name', + help='Configuration to use, defining which diagnostics' + ' to produce.' + ) + parser.add_argument('--latitudes', type=int, choices=range(-90, 90), + nargs='+', + help='Latitude of site of interest.' + ) + parser.add_argument('--longitudes', type=int, choices=range(-180, 180), + nargs='+', + help='Longitude of site of interest.' + ) + parser.add_argument('--altitudes', type=float, nargs='+', + help='Altitude of site of interest.' + ) + parser.add_argument('--site_ids', type=float, nargs='+', + help='ID no. for sites can be set if desired.' + ) + parser.add_argument('--start_date', type=str, + help='Start date of forecast in format YYYYMMDD ' + '(e.g. 20170327 = 27th March 2017).' + ) + parser.add_argument('--start_time', type=int, + help='Starting hour in 24hr clock of forecast. ' + '(e.g. 3 = 03Z, 14 = 14Z).' + ) + parser.add_argument('--length', type=int, + help='Length of forecast in hours.' + ) + parser.add_argument('--multiprocess', type=bool, + help='Process diagnostics using multiprocessing.' + ) + + args = parser.parse_args() + + run_framework(args.config_name, latitudes=args.latitudes, + longitudes=args.longitudes, altitudes=args.altitudes, + site_ids=args.site_ids, + forecast_date=args.start_date, forecast_time=args.start_time, + forecast_length=args.length, + use_multiprocessing=args.multiprocess) diff --git a/lib/improver/spotdata/neighbour_finding.py b/lib/improver/spotdata/neighbour_finding.py new file mode 100644 index 0000000000..d2b6808d33 --- /dev/null +++ b/lib/improver/spotdata/neighbour_finding.py @@ -0,0 +1,445 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +"""Neighbour finding for the Improver site specific process chain.""" + +import numpy as np +import cartopy.crs as ccrs +from iris.analysis.trajectory import interpolate +from improver.spotdata.ancillaries import data_from_ancillary +from improver.spotdata.common_functions import (ConditionalListExtract, + nearest_n_neighbours, + get_nearest_coords, + index_of_minimum_difference, + list_entry_from_index, + node_edge_test) + + +class PointSelection(object): + """ + For the selection of source data from a grid for use in deriving + conditions at an arbitrary coordinate. + + """ + + def __init__(self, method='default'): + """neighbour_list = find_nearest_neighbours(cube, spot_sites) + The class is called with the desired method to be used in determining + the grid point closest to sites of interest. + + """ + self.method = method + + def process(self, cube, sites, **kwargs): + """ + Call the correct function to enact the method of PointSelection + specified. + + """ + function = getattr(self, self.method) + return function(cube, sites, **kwargs) + + @staticmethod + def fast_nearest_neighbour(cube, sites, ancillary_data=None): + ''' + Use iris coord.nearest_neighbour_index function to locate the nearest + grid point to the given latitude/longitude pair. + + Performed on a 2D-surface; consider using the much slower + iris.analysis.trajectory.interpolate method for a more correct nearest + neighbour search with projection onto a spherical surface; this is + typically much slower. + + Args: + ----- + cube : Iris cube of gridded data. + sites : Dictionary of site data, including lat/lon and + altitude information. + e.g. {: {'latitude': 50, 'longitude': 0, + 'altitude': 10}} + ancillary_data : A dictionary containing additional model data that + is needed. e.g. {'orography': } + + Returns: + -------- + neighbours: Numpy array of grid i,j coordinates that are nearest to + each site coordinate given. Includes height difference + between site and returned grid point if orography is + provided. + + ''' + if ancillary_data is not None and ancillary_data['orography']: + calculate_dz = True + orography = data_from_ancillary(ancillary_data, 'orography') + else: + calculate_dz = False + + neighbours = np.empty(len(sites), dtype=[('i', 'i8'), + ('j', 'i8'), + ('dz', 'f8'), + ('edge', 'bool_')]) + + # Check cube coords are lat/lon, else transform lookup coordinates. + trg_crs = xy_test(cube) + + imax = cube.coord(axis='y').shape[0] + jmax = cube.coord(axis='x').shape[0] + + for i_site, site in enumerate(sites.itervalues()): + latitude, longitude, altitude = (site['latitude'], + site['longitude'], + site['altitude']) + + longitude, latitude = xy_transform(trg_crs, latitude, longitude) + i_latitude, j_longitude = get_nearest_coords(cube, latitude, + longitude) + + dz_site_grid = 0. + if calculate_dz: + dz_site_grid = altitude - orography[i_latitude, j_longitude] + + neighbours[i_site] = (int(i_latitude), int(j_longitude), + dz_site_grid, + (i_latitude == imax or j_longitude == jmax)) + + return neighbours + +# @staticmethod +# def nearest_neighbour(cube, sites, ancillary_data=None): +# ''' +# Uses the +# iris.analysis._interpolate_private._nearest_neighbour_indices_ndcoords +# function to locate the nearest grid point to the given latitude/ +# longitude pair, taking into account the projection of the cube. +# +# Method is equivalent to extracting data directly with +# iris.analysis.trajectory.interpolate method, which calculates nearest +# neighbours using great arcs on a spherical surface. Using the private +# function we are able to get the list of indices for reuse by multiple +# diagnostics. +# +# Args: +# ----- +# cube : Iris cube of gridded data. +# sites : Dictionary of site data, including lat/lon and +# altitude information. +# ancillary_data : A dictionary containing additional model data that +# is needed. e.g. {'orography': } +# +# Returns: +# -------- +# neighbours: Numpy array of grid i,j coordinates that are nearest to +# each site coordinate given. Includes height difference +# between site and returned grid point if orography is +# provided. +# +# ''' +# if ancillary_data is not None and ancillary_data['orography']: +# calculate_dz = True +# orography = data_from_ancillary(ancillary_data, 'orography') +# else: +# calculate_dz = False +# +# neighbours = np.empty(len(sites), dtype=[('i', 'i8'), +# ('j', 'i8'), +# ('dz', 'f8')]) +# +# # Check cube coords are lat/lon, else transform lookup coordinates. +# trg_crs = xy_test(cube) +# +# spot_sites = [('latitude', +# [sites[key]['latitude'] for key in sites.keys()]), +# ('longitude', +# [sites[key]['longitude'] for key in sites.keys()])] +# +# spot_orography = interpolate(cube, spot_sites, method='nearest') +# +# cube_lats = cube.coord(axis='y').points +# spot_lats = spot_orography.coord('latitude').points +# +# cube_lons = cube.coord(axis='x').points +# spot_lons = spot_orography.coord('longitude').points +# +# int_ind_i = [] +# int_ind_j = [] +# for point in spot_lats: +# indices_lat = (np.where(point == cube_lats)[0][0]) +# int_ind_i.append(indices_lat) +# for point in spot_lons: +# indices_lon = (np.where(point == cube_lons)[0][0]) +# int_ind_j.append(indices_lon) +# i_indices = int_ind_i +# j_indices = int_ind_j +# +# # i_indices, j_indices = zip(*[(i, j) for _, i, j in neighbour_list]) +# +# dz = [0] * len(neighbour_list) +# if calculate_dz: +# altitudes = [sites[key]['altitude'] for key in sites.keys()] +# dz = altitudes - orography[i_indices, j_indices] +# +# neighbours['i'] = i_indices +# neighbours['j'] = j_indices +# neighbours['dz'] = dz +# +# return neighbours + + def minimum_height_error_neighbour(self, cube, sites, + default_neighbours=None, + relative_z=None, + land_constraint=False, + ancillary_data=None): + + ''' + Find the horizontally nearest neighbour, then relax the conditions + to find the neighbouring point in the 9 nearest nodes to the input + coordinate that minimises the height difference. This is typically + used for temperature, where vertical displacement can be much more + important that horizontal displacement in determining the conditions. + + A vertical displacement bias may be applied with the relative_z + keyword; whether to prefer grid points above or below the site, or + neither. + + A land constraint may be applied that requires a land grid point be + selected for a site that is over land. Currently this is established + by checking that the nearest grid point barring any other conditions + is a land point. If a site is a sea point it will use the nearest + neighbour as there should be no vertical displacement difference with + other sea points. + + Args: + ----- + cube : Iris cube of gridded data. + sites : Dictionary of site data, including lat/lon and + altitude information. + relative_z : Sets the preferred vertical displacement of the grid + point relative to the site; above/below/None. + land_constraint: A boolean that determines if land sites should only + select from grid points also over land. + ancillary_data : A dictionary containing additional model data that + is needed. + Must contain {'orography': }. + Needs {'land': } if using land + constraint. + + Returns: + -------- + neighbours: Numpy array of grid i,j coordinates that are nearest to + each site coordinate given. Includes height difference + between site and returned grid point. + + ''' + # Use the default nearest neighbour list as a starting point, and + # if for some reason it is missing, recreate the list using the fast + # method. + if default_neighbours is None: + neighbour_list = self.fast_nearest_neighbour(cube, sites, + ancillary_data) + else: + neighbour_list = default_neighbours + + orography = data_from_ancillary(ancillary_data, 'orography') + if land_constraint: + land = data_from_ancillary(ancillary_data, 'land') + + for i_site, site in enumerate(sites.itervalues()): + altitude = site['altitude'] + + i, j = neighbour_list['i'][i_site], neighbour_list['j'][i_site] + edgecase = neighbour_list['edge'][i_site] + + node_list = nearest_n_neighbours(i, j, 9) + if edgecase: + node_list = node_edge_test(node_list, cube) + + if land_constraint: + # Check that we are considering a land point and that at least + # one neighbouring point is also land. If not no modification + # is made to the nearest neighbour coordinates. + + exclude_self = nearest_n_neighbours(i, j, 9, exclude_self=True) + if edgecase: + exclude_self = node_edge_test(exclude_self, cube) + if not land[i, j] or not any(land[exclude_self]): + continue + + node_list = ConditionalListExtract('not_equal_to').process( + land, node_list, 0) + + dz_nearest = abs(altitude - orography[i, j]) + dzs = altitude - orography[node_list] + + dzs, dz_nearest, dz_subset = apply_bias( + relative_z, dzs, dz_nearest, altitude, orography, i, j) + + ij_min = index_of_minimum_difference(dzs, subset_list=dz_subset) + i_min, j_min = list_entry_from_index(node_list, ij_min) + dz_min = abs(altitude - orography[i_min, j_min]) + + if dz_min < dz_nearest: + neighbour_list[i_site] = i_min, j_min, dzs[ij_min], edgecase + + return neighbour_list + +# Wrapper routines to use the dz minimisation routine with various options. +# These can be called as methods and set in the diagnostic configs. +# It may be better to simply use the keyword options at a higher level, +# but that will make the config more complex. + + def min_dz_no_bias(self, cube, sites, **kwargs): + ''' Return local grid neighbour with minimum vertical displacement''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z=None, + **kwargs) + + def min_dz_biased_above(self, cube, sites, **kwargs): + ''' + Return local grid neighbour with minimum vertical displacement, + biased to select grid points above the site altitude. + + ''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z='above', + **kwargs) + + def min_dz_biased_below(self, cube, sites, **kwargs): + ''' + Return local grid neighbour with minimum vertical displacement, + biased to select grid points below the site altitude. + + ''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z='below', + **kwargs) + + def min_dz_land_no_bias(self, cube, sites, **kwargs): + ''' + Return local grid neighbour with minimum vertical displacement. + Require land point neighbour if site is a land point. + + ''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z=None, + land_constraint=True, + **kwargs) + + def min_dz_land_biased_above(self, cube, sites, **kwargs): + ''' + Return local grid neighbour with minimum vertical displacement, + biased to select grid points above the site altitude. + Require land point neighbour if site is a land point. + + ''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z='above', + land_constraint=True, + **kwargs) + + def min_dz_land_biased_below(self, cube, sites, **kwargs): + ''' + Return local grid neighbour with minimum vertical displacement, + biased to select grid points below the site altitude. + Require land point neighbour if site is a land point. + + ''' + return self.minimum_height_error_neighbour(cube, sites, + relative_z='below', + land_constraint=True, + **kwargs) + + +def apply_bias(relative_z, dzs, dz_nearest, altitude, orography, i, j): + ''' + Bias neighbour selection to look for grid points with an + altitude that is above or below the site if relative_z is + not None. + + ''' + if relative_z == 'above': + dz_subset, = np.where(dzs <= 0) + if dz_nearest > 0: + dz_nearest = 1.E6 + elif relative_z == 'below': + dz_subset, = np.where(dzs >= 0) + if dz_nearest < 0: + dz_nearest = 1.E6 + + if relative_z is None or len(dz_subset) == 0 or len(dz_subset) == len(dzs): + dz_subset = np.arange(len(dzs)) + dz_nearest = abs(altitude - orography[i, j]) + + return dzs, dz_nearest, dz_subset + + +def xy_test(cube): + ''' + Test whether a diagnostic cube is on a latitude/longitude grid or uses an + alternative projection. + + Args: + ----- + cube : A diagnostic cube to test. + + Returns: + -------- + trg_crs : None if the cube data is on a latitude/longitude grid. Otherwise + trg_crs is the coordinate system in a cartopy format. + ''' + trg_crs = None + if (not cube.coord(axis='x').name() == 'longitude' or + not cube.coord(axis='y').name() == 'latitude'): + trg_crs = cube.coord_system().as_cartopy_crs() + return trg_crs + + +def xy_transform(trg_crs, latitude, longitude): + ''' + Transforms latitude/longitude coordinate pairs from a latitude/longitude + grid into an alternative projection defined by trg_crs. + + Args: + ----- + trg_crs : Target coordinate system in cartopy format. + latitude : Latitude coordinate. + longitude : Longitude coordinate. + + Returns: + -------- + x, y : longitude and latitude transformed into the target coordinate + system. + + ''' + if trg_crs is None: + return longitude, latitude + else: + return trg_crs.transform_point(longitude, latitude, + ccrs.PlateCarree()) diff --git a/lib/improver/spotdata/read_input.py b/lib/improver/spotdata/read_input.py new file mode 100644 index 0000000000..e5412fc877 --- /dev/null +++ b/lib/improver/spotdata/read_input.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Plugins written for the Improver site specific process chain. +For reading data files from UM output and site specification input. + +""" + +from iris import load_cube, load +from iris import FUTURE +from iris.cube import CubeList + +FUTURE.netcdf_promote = True + + +class Load(object): + + """Plugin for loading data.""" + + def __init__(self, method): + """ + Simple function that currently takes a filename and loads a netCDF + file. + + """ + self.method = method + + def process(self, filepath, diagnostic): + """ + Simple wrapper for using iris load on a supplied netCDF file. + + Returns + ------- + Cube + A cube containing the data from the netCDF file. + + """ + function = getattr(self, self.method) + return function(filepath, diagnostic) + + @staticmethod + def single_file(filepath, diagnostic): + """ Load and return a single iris.cube.Cube """ + return load_cube(filepath, diagnostic) + + @staticmethod + def multi_file(filepath, diagnostic): + """ Load multiple cubes and return a iris.cube.CubeList """ + return load(filepath, diagnostic) + + +def get_additional_diagnostics(diagnostic_name, time_extract=None): + """ + Load additional diagnostics needed for particular spot data processes. + + Args + ---- + diagnostic_name : The name of the diagnostic to be loaded. Used to find + the relevant file. + time_extract : An iris constraint to extract and return only data from + the desired time. + + Returns + ------- + cube : An iris.cube.CubeList containing the desired diagnostic + data, with a single entry is time_extract is provided. + + """ + from improver.spotdata.configurations import diagnostic_file_path + with FUTURE.context(cell_datetime_objects=True): + cubes = Load('multi_file').process( + diagnostic_file_path + '/*/*' + diagnostic_name + '*', + None) + if time_extract is not None: + cube = cubes.extract(time_extract) + cubes = CubeList() + cubes.append(cube) + return cubes diff --git a/lib/improver/spotdata/site_data.py b/lib/improver/spotdata/site_data.py new file mode 100644 index 0000000000..bb37938cc3 --- /dev/null +++ b/lib/improver/spotdata/site_data.py @@ -0,0 +1,174 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Plugins written for the Improver site specific process chain. + +""" + +import numpy as np +import cPickle +from collections import OrderedDict + + +class ImportSiteData(object): + ''' + Create a dictionary of site information from a variety of sources. + Currently supported are the import of a pickle file with site + information - called with 'pickle_file' + Or lists of properties for sites - called with 'runtime_list'. + + ''' + + def __init__(self, source): + ''' + Class is called with the desired source of site data. The source + may be a pickle file or a runtime_list that is defined on the command + line or in the suite. + + Args: + ----- + source : string setting the source of site data. + + ''' + self.source = source + self.latitudes = None + self.longitudes = None + self.altitudes = None + self.site_ids = None + self.gmtoffsets = None + + def process(self, *args, **kwargs): + '''Call the required method''' + function = getattr(self, self.source) + return function(*args, **kwargs) + + def pickle_file(self, file_path): + ''' + Use a pickle file produced by the current SSPS system. + + ''' + site_data = self.read_pickle_file(file_path) + + self.latitudes = np.array([site.latitude for site in site_data]) + self.longitudes = np.array([site.longitude for site in site_data]) + self.altitudes = np.array([site.altitude for site in site_data]) + self.site_ids = np.array([site.bestdata_id for site in site_data]) + self.gmtoffsets = np.array([site.gmtoffset for site in site_data]) + + return self.construct_site_dictionary() + + @staticmethod + def read_pickle_file(file_path): + ''' + Uses existing bestdata site routines to decode pickle file created + by bestdata2. + + Args: + ----- + file_path : Path to target pickle file. + + Returns: + -------- + bd_site_data : bestdata site class containing site information. + + ''' + try: + with open(file_path, 'rb') as bd_pickle_file: + _ = cPickle.load(bd_pickle_file) + [_, _, bd_site_data, _, _] = (cPickle.load(bd_pickle_file)) + except: + raise Exception("Unable to read pickle file.") + + return bd_site_data + + def runtime_list(self, latitudes, longitudes, + altitudes=None, site_ids=None): + ''' + Use data provided on the command line/controlling suite at runtime. + + ''' + if site_ids is not None: + self.site_ids = np.array(site_ids) + else: + self.site_ids = np.arange(len(latitudes)) + if altitudes is not None: + self.altitudes = np.array(altitudes) + else: + self.altitudes = np.zeros(len(latitudes)) + + self.latitudes = np.array(latitudes) + self.longitudes = np.array(longitudes) + self.gmtoffsets = set_gmt_offset(self.longitudes) + + return self.construct_site_dictionary() + + def construct_site_dictionary(self): + ''' + Constructs a dictionary of site data regardles of source to give the + spotdata routines a consistent source of site data. + + Returns: + -------- + sites : Dictionary of site data. + + ''' + sites = OrderedDict() + for i_site, site_id in enumerate(self.site_ids): + if self.gmtoffsets[i_site] is None: + self.gmtoffsets[i_site] = 0 + sites.update( + {site_id: { + 'latitude': self.latitudes[i_site], + 'longitude': self.longitudes[i_site], + 'altitude': self.altitudes[i_site], + 'gmtoffset': self.gmtoffsets[i_site] + } + }) + return sites + + +def set_gmt_offset(longitudes): + ''' + Simplistic timezone setting for unset sites that uses 15 degree bins + centred on 0 degrees longitude. Used for on the fly site generation + when no more rigorous source of timeszone information is provided. + + Args: + ----- + longitudes : list of longitudes. + + Returns: + -------- + gmtoffsets : list of gmtoffsets calculated using longitude. + + ''' + return ((longitudes + (7.5*np.sign(longitudes)))/15).astype(int) diff --git a/lib/improver/spotdata/tests/test_extract_data.py b/lib/improver/spotdata/tests/test_extract_data.py new file mode 100644 index 0000000000..6e032caf85 --- /dev/null +++ b/lib/improver/spotdata/tests/test_extract_data.py @@ -0,0 +1,372 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for the spotdata.ExtractData plugin.""" + + +import unittest +import cf_units + +from datetime import datetime as dt +from iris.coords import (DimCoord, + AuxCoord) +from iris import coord_systems +from iris.coord_systems import GeogCS +from iris.cube import (Cube, + CubeList) +from iris import Constraint +from iris.tests import IrisTest +from iris.time import PartialDateTime +import cartopy.crs as ccrs +from collections import OrderedDict +from iris import FUTURE +import numpy as np + +from improver.spotdata.extract_data import ExtractData + +FUTURE.cell_datetime_objects = True + +class TestExtractData(IrisTest): + + """Test the extract data plugin.""" + + def setUp(self): + """ + Create a cube containing a regular lat-lon grid. + + Data is formatted to increase linearly in x/y dimensions, + e.g. + 0 1 2 3 + 1 2 3 4 + 2 3 4 5 + 3 4 5 6 + + """ + data = np.arange(0,20,1) + for i in range(1,20): + data = np.append(data, np.arange(i, 20+i)) + + data.resize(1, 20, 20) + latitudes = np.linspace(-90, 90, 20) + longitudes = np.linspace(-180, 180, 20) + latitude = DimCoord(latitudes, standard_name='latitude', + units='degrees', coord_system=GeogCS(6371229.0)) + longitude = DimCoord(longitudes, standard_name='longitude', + units='degrees', coord_system=GeogCS(6371229.0)) + + # Use time of 2017-02-17 06:00:00 + time = DimCoord([1487311200], standard_name='time', + units=cf_units.Unit('seconds since 1970-01-01 00:00:00', + calendar='gregorian')) + + time_dt = [dt(2017, 02, 17, 06, 00)] + # time_extract = Constraint(time=PartialDateTime(2017, 02, 17, 06, 00)) + + cube = Cube(data, + long_name="test_data", + dim_coords_and_dims=[(time, 0), + (latitude, 1), + (longitude, 2)], + units="1") + + cubes = CubeList() + cubes.append(cube) + + orography = Cube(np.ones((20,20)), + long_name="surface_altitude", + dim_coords_and_dims=[(latitude, 0), + (longitude, 1)], + units="m") + + # Western half of grid at altitude 0, eastern half at 10. + # Note that the pressure_on_height_levels data is left unchanged, + # so it is as if there is a sharp front running up the grid with + # differing pressures on either side at equivalent heights above + # the surface (e.g. east 1000hPa at 0m AMSL, west 1000hPa at 10m AMSL). + # So there is higher pressure in the west. + orography.data[0:10] = 0 + orography.data[10:] = 10 + ancillary_data = {} + ancillary_data.update({'orography': orography}) + + # Create additional vertical data used to calculate temperature lapse + # rates from model levels. + + Tlevel0 = np.ones((1, 20, 20))*20. + Tlevel1 = np.ones((1, 20, 20))*-20. + Tlevel2 = np.ones((1, 20, 20))*-60. + Tdata = np.vstack((Tlevel0, Tlevel1, Tlevel2)) + Tdata.resize((1, 3, 20, 20)) + + Plevel0 = np.ones((1, 20, 20))*1000. + Plevel1 = np.ones((1, 20, 20))*900. + Plevel2 = np.ones((1, 20, 20))*800. + Pdata = np.vstack((Plevel0, Plevel1, Plevel2)) + Pdata.resize((1, 3, 20, 20)) + + height = DimCoord([0., 50., 100.], standard_name='height', units='m') + + temperature_on_height_levels = CubeList() + temperature_on_height_levels.append( + Cube( + Tdata, + long_name="temperature_on_height_levels", + dim_coords_and_dims=[(time, 0), (height,1), + (latitude, 2), (longitude, 3)], + units="degree_Celsius")) + + pressure_on_height_levels = CubeList() + pressure_on_height_levels.append( + Cube( + Pdata, + long_name="pressure_on_height_levels", + dim_coords_and_dims=[(time, 0), (height,1), + (latitude, 2), (longitude, 3)], + units="hPa")) + + surface_pressure = CubeList() + surface_pressure.append( + Cube( + Pdata[0,0].reshape(1, 20, 20), + long_name="surface_pressure", + dim_coords_and_dims=[(time, 0), (latitude, 1), (longitude, 2)], + units="hPa")) + + ad = {'temperature_on_height_levels': temperature_on_height_levels, + 'pressure_on_height_levels': pressure_on_height_levels, + 'surface_pressure': surface_pressure} + + sites = OrderedDict() + sites.update({'100': {'latitude': 4.74, + 'longitude': 9.47, + 'altitude': 10, + 'gmtoffset': 0 + } + }) + + neighbour_list = np.empty(1, dtype=[('i', 'i8'), + ('j', 'i8'), + ('dz', 'f8'), + ('edge', 'bool_')]) + + neighbour_list[0] = 10, 10, 0, False + + self.cubes = cubes + self.ancillary_data = ancillary_data + self.ad = ad + self.sites = sites + self.neighbour_list = neighbour_list + self.time_dt = time_dt + + + def return_type(self, method, additional_data, **kwargs): + """Test that the plugin returns an iris.cube.CubeList.""" + plugin = ExtractData(method) + result = plugin.process(self.cubes, self.sites, self.neighbour_list, + self.time_dt, additional_data, **kwargs) + + self.assertIsInstance(result, CubeList) + + def extracted_value(self, method, additional_data, expected, **kwargs): + """Test that the plugin returns the correct value.""" + plugin = ExtractData(method) + result = plugin.process(self.cubes, self.sites, self.neighbour_list, + self.time_dt, additional_data, **kwargs) + self.assertAlmostEqual(result[0].data, expected) + + def different_projection(self, method, additional_data, expected, **kwargs): + """Test that the plugin copes with non-lat/lon grids.""" + + trg_crs = None + src_crs = ccrs.PlateCarree() + trg_crs = ccrs.LambertConformal(central_longitude=50, + central_latitude=10) + trg_crs_iris = coord_systems.LambertConformal( + central_lon=50, central_lat=10) + lons = self.cubes[0].coord('longitude').points + lats = self.cubes[0].coord('latitude').points + x, y = [], [] + for lon, lat in zip(lons, lats): + x_trg, y_trg = trg_crs.transform_point(lon, lat, src_crs) + x.append(x_trg) + y.append(y_trg) + + new_x = AuxCoord(x, standard_name='projection_x_coordinate', + units='m', coord_system=trg_crs_iris) + new_y = AuxCoord(y, standard_name='projection_y_coordinate', + units='m', coord_system=trg_crs_iris) + + cube = Cube(self.cubes[0].data, + long_name="test_data", + dim_coords_and_dims=[(self.cubes[0].coord('time'), 0)], + aux_coords_and_dims=[(new_y, 1), (new_x, 2)], + units="1") + + cubes = CubeList() + cubes.append(cube) + + plugin = ExtractData(method) + result = plugin.process(cubes, self.sites, self.neighbour_list, + self.time_dt, additional_data, **kwargs) + + self.assertEqual(cubes[0].coord_system(), trg_crs_iris) + self.assertAlmostEqual(result[0].data, expected) + self.assertEqual(result[0].coord(axis='y').name(), 'latitude') + self.assertEqual(result[0].coord(axis='x').name(), 'longitude') + self.assertAlmostEqual(result[0].coord(axis='y').points, 4.74) + self.assertAlmostEqual(result[0].coord(axis='x').points, 9.47) + + + def missing_ancillary_data(self, method, additional_data, **kwargs): + """Test that the plugin copes with missing ancillary data.""" + plugin = ExtractData(method) + msg = "Ancillary data" + with self.assertRaisesRegexp(Exception, msg): + result = plugin.process( + self.cubes, self.sites, self.neighbour_list, + self.time_dt, additional_data, **kwargs) + + def missing_additional_data(self, method, additional_data, **kwargs): + """Test that the plugin copes with missing additional data.""" + plugin = ExtractData(method) + msg = "Required additional data is unset" + with self.assertRaisesRegexp(Exception, msg): + result = plugin.process( + self.cubes, self.sites, self.neighbour_list, + self.time_dt, additional_data, **kwargs) + + +class use_nearest(TestExtractData): + + method = 'use_nearest' + + def test_return_type(self): + self.return_type(self.method, None, ancillary_data=None) + + def test_extracted_value(self): + """Test that the plugin returns the correct value.""" + expected = 20 + self.extracted_value(self.method, None, expected, ancillary_data=None) + + def test_different_projection(self): + """Test that the plugin copes with non-lat/lon grids.""" + expected=20. + self.different_projection(self.method, None, expected, ancillary_data=None) + + +class orography_derived_temperature_lapse_rate(TestExtractData): + + method = 'orography_derived_temperature_lapse_rate' + + def test_return_type(self): + self.return_type(self.method, None, ancillary_data=self.ancillary_data) + + def test_extracted_value(self): + """ + Test that the plugin returns the correct value. + + Fit line given data above is: T = 0.15*altitude + 19 + Site defined with has altitude=10, so T+expected = 20.5. + + """ + expected = 20.5 + self.extracted_value(self.method, None, expected, ancillary_data=self.ancillary_data) + + def test_different_projection(self): + """ + Test that the plugin copes with non-lat/lon grids. + + Cube is transformed into a LambertConformal projection. The usual + latitude/longitude coordinates are used to query the grid, with iris + functionality used to convert the query coordinates to the correct + projection. + + The returned cube has latitude/longitude dimensions. + + The expected value should be the same as the PlateCarree() projection + case above. + + """ + expected=20.5 + self.different_projection(self.method, None, expected, ancillary_data=self.ancillary_data) + + def test_missing_ancillary_data(self): + self.missing_ancillary_data(self.method, None, ancillary_data=None) + + +class model_level_temperature_lapse_rate(TestExtractData): + + method = 'model_level_temperature_lapse_rate' + + def test_return_type(self): + self.return_type(self.method, self.ad, ancillary_data=self.ancillary_data) + + def test_extracted_value(self): + """ + Test that the plugin returns the correct value. + + Site set to be 60m in altitude, which is a dz of +50m from the nearest + grid point (its neighbour). As such it should fall on the 900hPa level + and get a temperature of -20C. + + """ + self.sites['100']['altitude'] = 60. + self.neighbour_list['dz'] = 50. + expected = -20. + self.extracted_value(self.method, self.ad, expected, ancillary_data=self.ancillary_data) + + def test_different_projection(self): + """ + Test that the plugin copes with non-lat/lon grids. + + Cube is transformed into a LambertConformal projection. The usual + latitude/longitude coordinates are used to query the grid, with iris + functionality used to convert the query coordinates to the correct + projection. + + The returned cube has latitude/longitude dimensions. + + The expected value should be the same as the PlateCarree() projection + case above. + + """ + self.sites['100']['altitude'] = 60. + self.neighbour_list['dz'] = 50. + expected = -20. + plugin = ExtractData(self.method) + self.different_projection(self.method, self.ad, expected, ancillary_data=self.ancillary_data) + + def test_missing_additional_data(self): + plugin = ExtractData(self.method) + self.missing_additional_data(self.method, None, ancillary_data=self.ancillary_data) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py new file mode 100644 index 0000000000..c8abcbe84e --- /dev/null +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -0,0 +1,679 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for the spotdata.NeighbourFinding plugin.""" + + +import unittest + +from iris.coords import DimCoord +from iris.cube import Cube +from iris.tests import IrisTest +from collections import OrderedDict +import numpy as np + +from improver.spotdata.neighbour_finding import PointSelection + + +class TestNeighbourFinding(IrisTest): + + """Test the neighbour finding plugin.""" + + def setUp(self): + """Create a cube containing a regular lat-lon grid.""" + data = np.zeros((20, 20)) + latitudes = np.linspace(-90, 90, 20) + longitudes = np.linspace(-180, 180, 20) + latitude = DimCoord(latitudes, standard_name='latitude', + units='degrees') + longitude = DimCoord(longitudes, standard_name='longitude', + units='degrees') + + cube = Cube(data, + long_name="test_data", + dim_coords_and_dims=[(latitude, 0), (longitude, 1)], + units="1") + + orography = cube.copy() + orography.rename('surface_altitude') + land = cube.copy() + land.rename('land_binary_mask') + land.data = land.data + 1 + + ancillary_data = {} + ancillary_data.update({'orography': orography}) + ancillary_data.update({'land': land}) + + sites = OrderedDict() + sites.update({'100': {'latitude': 50, + 'longitude': 0, + 'altitude': 10, + 'gmtoffset': 0 + } + }) + + neighbour_list = np.empty(1, dtype=[('i', 'i8'), + ('j', 'i8'), + ('dz', 'f8'), + ('edge', 'bool_')]) + + self.cube = cube + self.ancillary_data = ancillary_data + self.sites = sites + self.neighbour_list = neighbour_list + + def return_types(self, method): + """Test that the plugin returns a numpy array.""" + plugin = PointSelection(method) + result = plugin.process(self.cube, self.sites, + ancillary_data=self.ancillary_data) + self.assertIsInstance(result, np.ndarray) + self.assertEqual(result.dtype, self.neighbour_list.dtype) + + def correct_neighbour(self, method, i_expected, j_expected, dz_expected): + """Test that the plugin returns the expected neighbour""" + plugin = PointSelection(method) + result = plugin.process(self.cube, self.sites, + ancillary_data=self.ancillary_data) + self.assertEqual(result['i'], i_expected) + self.assertEqual(result['j'], j_expected) + self.assertEqual(result['dz'], dz_expected) + + def without_ancillary_data(self, method): + """Test plugins behaviour with no ancillary data provided""" + plugin = PointSelection(method) + if method == 'fast_nearest_neighbour': + result = plugin.process(self.cube, self.sites) + self.assertIsInstance(result, np.ndarray) + else: + msg = 'Ancillary data' + with self.assertRaisesRegexp(Exception, msg): + result = plugin.process(self.cube, self.sites) + + +class fast_nearest_neighbour(TestNeighbourFinding): + ''' + Tests for fast_nearest_neighbour method. No other conditions beyond + proximity are considered. + + ''' + method = 'fast_nearest_neighbour' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_correct_neighbour(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_without_ancillary_data(self): + ''' + Should function without any ancillary fields and return expected type. + ''' + self.without_ancillary_data(self.method) + + +class min_dz_no_bias(TestNeighbourFinding): + ''' + Tests for min_dz_no_bias method. This method seeks to minimise + the vertical displacement between a site and the selected neigbouring + grid point. There is no bias as to whether dz is positive (grid point + below site) or dz is negative (grid point above site). + + ''' + + method = 'min_dz_no_bias' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the first occurence of this + smallest dz that is comes across; (14, 10) is tested before (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + + With no vertical displacement bias the smallest of the two unequal + dz values is chosen; -1 at (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 16, 10, -1.) + + +class min_dz_biased_above(TestNeighbourFinding): + ''' + Tests for min_dz_biased_above. This method seeks to minimise + the vertical displacement between a site and the selected neigbouring + grid point. There is a bias towards dz being negative (grid point above + site), but if this condition cannot be met, a minimum positive dz (grid + point below site) neighbour will be returned. + + ''' + + method = 'min_dz_biased_above' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes above the site if these are available. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes ABOVE the site if these are available. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the point which obeys the bias + condition; here (16, 10) is ABOVE the site and will be chosen instead + of (14, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 16, 10, -1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes ABOVE the site if these are available. + + In this case the minimum vertical grid point displacement 1 at (14, 10) + goes against the selection bias of grid points ABOVE the site. As such + the next nearest dz that fulfils the bias condition is chosen; -2 at + (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 12. + self.correct_neighbour(self.method, 16, 10, -2.) + + +class min_dz_biased_below(TestNeighbourFinding): + ''' + Tests for min_dz_biased_below. This method seeks to minimise + the vertical displacement between a site and the selected neigbouring + grid point. There is a bias towards dz being positive (grid point below + site), but if this condition cannot be met, a minimum negative dz (grid + point above site) neighbour will be returned. + + ''' + + method = 'min_dz_biased_below' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes below the site if these are available. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes BELOW the site if these are available. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the point which obeys the bias + condition; here (14, 10) is BELOW the site and will be chosen instead + of (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes BELOW the site if these are available. + + In this case the minimum vertical grid point displacement -1 at + (16, 10) goes against the selection bias of grid points BELOW the site. + As such the next nearest dz that fulfils the bias condition is chosen; + 2 at (14, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 2.) + + +class min_dz_land_no_bias(TestNeighbourFinding): + ''' + Tests for min_dz_land_no_bias method. This method seeks to + minimise the vertical displacement between a site and the selected + neigbouring grid point. There is no bias as to whether dz is positive + (grid point below site) or dz is negative (grid point above site). + + A neighbouring grid point is REQUIRED to be a land point if the site's + first guess nearest neigbour is a land point. If the first guess neighbour + is a sea point, the site is assumed to be a sea point as well the + neighbour point will not be changed. + + ''' + + method = 'min_dz_land_no_bias' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the first occurence of this + smallest dz that is comes across; (14, 10) is tested before (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + + With no vertical displacement bias the smallest of the two unequal + dz values is chosen; -1 at (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 16, 10, -1.) + + def test_correct_neighbour_no_orography_land(self): + ''' + Sets nearest grid point to be a sea point. Assumes site is sea point + and leaves coordinates unchanged (dz should not vary over the sea). + + ''' + self.ancillary_data['land'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography_equal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. No relative altitude bias in selection. + + This test requires a land point be selected. Any grid point not above + land is discounted. So (14, 10) is disregarded leaving (16, 10) to give + the smallest dz, so this point is returned as the neighbour. + + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.ancillary_data['land'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -1.) + + def test_correct_neighbour_orography_unequal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. A land point is required. No relative altitude + bias in selection. + + (16, 10) is disregarded as it is a sea point. As such (14, 10) is + returned as the neighbour despite its slightly larger dz. + + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.ancillary_data['land'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 2.) + + +class min_dz_land_biased_above(TestNeighbourFinding): + ''' + Tests for min_dz_land_biased_above. This method seeks to minimise + the vertical displacement between a site and the selected neigbouring + grid point. There is a bias towards dz being negative (grid point above + site), but if this condition cannot be met, a minimum positive dz (grid + point below site) neighbour will be returned. + + A neighbouring grid point is REQUIRED to be a land point if the site's + first guess nearest neigbour is a land point. If the first guess neighbour + is a sea point, the site is assumed to be a sea point as well the + neighbour point will not be changed. + + ''' + + method = 'min_dz_land_biased_above' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes above the site if these are available. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes ABOVE the site if these are available. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the point which obeys the bias + condition; here (16, 10) is ABOVE the site and will be chosen instead + of (14, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 16, 10, -1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes ABOVE the site if these are available. + + In this case the minimum vertical grid point displacement 1 at (14, 10) + goes against the selection bias of grid points ABOVE the site. As such + the next nearest dz that fulfils the bias condition is chosen; -2 at + (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 12. + self.correct_neighbour(self.method, 16, 10, -2.) + + def test_correct_neighbour_no_orography_land(self): + ''' + Sets nearest grid point to be a sea point. Assumes site is sea point + and leaves coordinates unchanged (dz should not vary over the sea). + + ''' + self.ancillary_data['land'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography_equal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes ABOVE the site if these are available. + + This test requires a land point be selected. Any grid point not above + land is discounted. So (16, 10) is disregarded leaving (14, 10) to give + the smallest dz. This point is returned as the neighbour as no points + fulfill the ABOVE bias condition. + + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.ancillary_data['land'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 1.) + + def test_correct_neighbour_orography_unequal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. A land point is required. Biased to prefer + grid points with relative altitudes ABOVE the site if these are + available. + + This test requires a land point be selected. Any grid point not above + land is discounted. So (16, 10) is disregarded leaving (14, 10) to give + the smallest dz. This point is returned as the neighbour as no other + point fulfills the ABOVE bias condition. + + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.ancillary_data['land'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 2.) + + +class min_dz_land_biased_below(TestNeighbourFinding): + ''' + Tests for min_dz_land_biased_below. This method seeks to minimise + the vertical displacement between a site and the selected neigbouring + grid point. There is a bias towards dz being positive (grid point below + site), but if this condition cannot be met, a minimum negative dz (grid + point above site) neighbour will be returned. + + A neighbouring grid point is REQUIRED to be a land point if the site's + first guess nearest neigbour is a land point. If the first guess neighbour + is a sea point, the site is assumed to be a sea point as well the + neighbour point will not be changed. + + ''' + + method = 'min_dz_land_biased_below' + + def test_return_type(self): + '''Ensure a numpy array of the format expected is returned.''' + self.return_types(self.method) + + def test_without_ancillary_data(self): + ''' + Ensure an exception is raised if needed ancillary fields are + missing. + ''' + self.without_ancillary_data(self.method) + + def test_correct_neighbour_no_orography(self): + '''Nearest neighbouring grid point with no other conditions''' + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes below the site if these are available. + ''' + self.ancillary_data['orography'].data[14, 10] = 10. + self.correct_neighbour(self.method, 14, 10, 0.) + + def test_correct_neighbour_orography_equal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes BELOW the site if these are available. + + In this case of equal minimum vertical grid point displacements above + and below the site the code will select the point which obeys the bias + condition; here (14, 10) is BELOW the site and will be chosen instead + of (16, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 1.) + + def test_correct_neighbour_orography_unequal_displacement(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes BELOW the site if these are available. + + In this case the minimum vertical grid point displacement -1 at + (16, 10) goes against the selection bias of grid points BELOW the site. + As such the next nearest dz that fulfils the bias condition is chosen; + 2 at (14, 10). + ''' + self.ancillary_data['orography'].data[14, 10] = 8. + self.ancillary_data['orography'].data[16, 10] = 11. + self.correct_neighbour(self.method, 14, 10, 2.) + + def test_correct_neighbour_no_orography_land(self): + ''' + Sets nearest grid point to be a sea point. Assumes site is sea point + and leaves coordinates unchanged (dz should not vary over the sea). + + ''' + self.ancillary_data['land'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10.) + + def test_correct_neighbour_orography_equal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. Biased to prefer grid points with relative + altitudes BELOW the site if these are available. + + This test requires a land point be selected. Any grid point not above + land is discounted. So (14, 10) is disregarded leaving (16, 10) to give + the smallest dz. This point is returned as the neighbour as no points + fulfill the BELOW bias condition. + + ''' + self.ancillary_data['orography'].data = ( + self.ancillary_data['orography'].data + 20.) + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 11. + self.ancillary_data['land'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -1.) + + def test_correct_neighbour_orography_unequal_displacement_land(self): + ''' + Nearest neighbouring grid point condition relaxed to give smallest + vertical displacement. A land point is required. Biased to prefer + grid points with relative altitudes BELOW the site if these are + available. + + This test requires a land point be selected. Any grid point not above + land is discounted. So (14, 10) is disregarded leaving (16, 10) to give + the smallest dz. This point is returned as the neighbour as no points + fulfill the BELOW bias condition. + + ''' + self.ancillary_data['orography'].data = ( + self.ancillary_data['orography'].data + 20.) + self.ancillary_data['orography'].data[14, 10] = 9. + self.ancillary_data['orography'].data[16, 10] = 12. + self.ancillary_data['land'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -2.) + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/spotdata/times.py b/lib/improver/spotdata/times.py new file mode 100644 index 0000000000..144fc0ff54 --- /dev/null +++ b/lib/improver/spotdata/times.py @@ -0,0 +1,74 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Plugins written for the Improver site specific process chain. + +""" + +from datetime import datetime as dt +from datetime import time +from datetime import timedelta + + +def get_forecast_times(forecast_date=None, forecast_time=None, + forecast_length=None): + ''' + Generate a list of python datetime objects specifying the desired forecast + times. This list will be created from input specifications if provided. + Otherwise defaults are to start today at the most recent 6-hourly interval + (00, 06, 12, 18) and to run out to T+144 hours. + + ''' + if forecast_date is not None: + start_date = dt.strptime(forecast_date, "%Y%m%d").date() + else: + start_date = dt.utcnow().date() + + if forecast_time is not None: + forecast_start_time = dt.combine(start_date, time(forecast_time)) + else: + # If no start hour provided, go back to the nearest multiple of 6 + # hours (e.g. utcnow = 11Z --> 06Z). + forecast_start_time = dt.combine( + start_date, time(divmod(dt.utcnow().hour, 6)[0]*6)) + + if forecast_length is None: + forecast_length = 144 + + # Generate forecast times. Hourly to T+48, 3 hourly to T+144. + forecast_times = [forecast_start_time + timedelta(hours=x) for x in + range(min(forecast_length, 49))] + forecast_times = (forecast_times + + [forecast_start_time + timedelta(hours=x) for x in + range(51, min(forecast_length, 144), 3)]) + + return forecast_times diff --git a/lib/improver/spotdata/write_output.py b/lib/improver/spotdata/write_output.py new file mode 100644 index 0000000000..6141d1c5f8 --- /dev/null +++ b/lib/improver/spotdata/write_output.py @@ -0,0 +1,50 @@ +""" +Plugins written for the Improver site specific process chain. + +""" + +import os +from iris import FUTURE + +FUTURE.netcdf_no_unlimited = True + + +class WriteOutput(object): + ''' Writes diagnostic cube data in a format determined by the method.''' + + def __init__(self, method): + ''' + Select the method (format) for writing out the data cubes. + + Args: + ----- + method : String that sets the method. + + ''' + self.method = method + self.dir_path = os.path.dirname(os.path.realpath(__file__)) + + def process(self, cube): + '''Call the required method''' + function = getattr(self, self.method) + function(cube) + + def as_netcdf(self, cube, path=None): + ''' + Writes iris.cube.Cube data out to netCDF format files. + + Args: + ----- + cube : iris.cube.Cube diagnostic data cube. + path : Optional string setting the output path for the file. + + Returns: + -------- + Nil. Writes out file to filepath or working directory. + + ''' + from iris.fileformats.netcdf import Saver + if path is None: + path = self.dir_path + with Saver('{}/{}.nc'.format(path, cube.name()), 'NETCDF4') as output: + output.write(cube) From e59a19f179a863d27d018d263e8171e59f2a974a Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 10:43:34 +0100 Subject: [PATCH 0032/1367] Added missing licensing header to these files. --- lib/improver/spotdata/configurations.py | 34 +++++++++++++++++++++--- lib/improver/spotdata/framework.py | 35 ++++++++++++++++++++++--- lib/improver/spotdata/write_output.py | 35 ++++++++++++++++++++++--- 3 files changed, 93 insertions(+), 11 deletions(-) diff --git a/lib/improver/spotdata/configurations.py b/lib/improver/spotdata/configurations.py index 5839fd6137..a2ea8e715a 100644 --- a/lib/improver/spotdata/configurations.py +++ b/lib/improver/spotdata/configurations.py @@ -1,7 +1,35 @@ -""" -Test data extraction configuration +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Test data extraction configuration""" -""" from os import environ as Environ DIAGNOSTIC_FILE_PATH = Environ.get('DIAGNOSTIC_FILE_PATH') diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py index 9888f72da3..db4d682b54 100644 --- a/lib/improver/spotdata/framework.py +++ b/lib/improver/spotdata/framework.py @@ -1,7 +1,34 @@ -""" -The framework for site specific post-processing. - -""" +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""The framework for site specific post-processing.""" import argparse import multiprocessing as mp diff --git a/lib/improver/spotdata/write_output.py b/lib/improver/spotdata/write_output.py index 6141d1c5f8..64459c269c 100644 --- a/lib/improver/spotdata/write_output.py +++ b/lib/improver/spotdata/write_output.py @@ -1,7 +1,34 @@ -""" -Plugins written for the Improver site specific process chain. - -""" +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Plugins written for the Improver site specific process chain.""" import os from iris import FUTURE From d89d1dbd8783def4d952297a4af270839eef2ca7 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 13:57:05 +0100 Subject: [PATCH 0033/1367] Pylint changes to unit tests. --- lib/improver/spotdata/neighbour_finding.py | 2 +- .../spotdata/tests/test_extract_data.py | 114 ++++++++++-------- .../spotdata/tests/test_neighbour_finding.py | 4 +- 3 files changed, 64 insertions(+), 56 deletions(-) diff --git a/lib/improver/spotdata/neighbour_finding.py b/lib/improver/spotdata/neighbour_finding.py index d2b6808d33..5c2b73a6a9 100644 --- a/lib/improver/spotdata/neighbour_finding.py +++ b/lib/improver/spotdata/neighbour_finding.py @@ -33,7 +33,7 @@ import numpy as np import cartopy.crs as ccrs -from iris.analysis.trajectory import interpolate +# from iris.analysis.trajectory import interpolate from improver.spotdata.ancillaries import data_from_ancillary from improver.spotdata.common_functions import (ConditionalListExtract, nearest_n_neighbours, diff --git a/lib/improver/spotdata/tests/test_extract_data.py b/lib/improver/spotdata/tests/test_extract_data.py index 6e032caf85..64bd1e41b2 100644 --- a/lib/improver/spotdata/tests/test_extract_data.py +++ b/lib/improver/spotdata/tests/test_extract_data.py @@ -61,7 +61,7 @@ def setUp(self): """ Create a cube containing a regular lat-lon grid. - Data is formatted to increase linearly in x/y dimensions, + Data is formatted to increase linearly in x/y dimensions, e.g. 0 1 2 3 1 2 3 4 @@ -69,8 +69,8 @@ def setUp(self): 3 4 5 6 """ - data = np.arange(0,20,1) - for i in range(1,20): + data = np.arange(0, 20, 1) + for i in range(1, 20): data = np.append(data, np.arange(i, 20+i)) data.resize(1, 20, 20) @@ -82,9 +82,10 @@ def setUp(self): units='degrees', coord_system=GeogCS(6371229.0)) # Use time of 2017-02-17 06:00:00 - time = DimCoord([1487311200], standard_name='time', - units=cf_units.Unit('seconds since 1970-01-01 00:00:00', - calendar='gregorian')) + time = DimCoord( + [1487311200], standard_name='time', + units=cf_units.Unit('seconds since 1970-01-01 00:00:00', + calendar='gregorian')) time_dt = [dt(2017, 02, 17, 06, 00)] # time_extract = Constraint(time=PartialDateTime(2017, 02, 17, 06, 00)) @@ -99,7 +100,7 @@ def setUp(self): cubes = CubeList() cubes.append(cube) - orography = Cube(np.ones((20,20)), + orography = Cube(np.ones((20, 20)), long_name="surface_altitude", dim_coords_and_dims=[(latitude, 0), (longitude, 1)], @@ -119,42 +120,42 @@ def setUp(self): # Create additional vertical data used to calculate temperature lapse # rates from model levels. - Tlevel0 = np.ones((1, 20, 20))*20. - Tlevel1 = np.ones((1, 20, 20))*-20. - Tlevel2 = np.ones((1, 20, 20))*-60. - Tdata = np.vstack((Tlevel0, Tlevel1, Tlevel2)) - Tdata.resize((1, 3, 20, 20)) - - Plevel0 = np.ones((1, 20, 20))*1000. - Plevel1 = np.ones((1, 20, 20))*900. - Plevel2 = np.ones((1, 20, 20))*800. - Pdata = np.vstack((Plevel0, Plevel1, Plevel2)) - Pdata.resize((1, 3, 20, 20)) - + t_level0 = np.ones((1, 20, 20))*20. + t_level1 = np.ones((1, 20, 20))*-20. + t_level2 = np.ones((1, 20, 20))*-60. + t_data = np.vstack((t_level0, t_level1, t_level2)) + t_data.resize((1, 3, 20, 20)) + + p_level0 = np.ones((1, 20, 20))*1000. + p_level1 = np.ones((1, 20, 20))*900. + p_level2 = np.ones((1, 20, 20))*800. + p_data = np.vstack((p_level0, p_level1, p_level2)) + p_data.resize((1, 3, 20, 20)) + height = DimCoord([0., 50., 100.], standard_name='height', units='m') temperature_on_height_levels = CubeList() temperature_on_height_levels.append( Cube( - Tdata, + t_data, long_name="temperature_on_height_levels", - dim_coords_and_dims=[(time, 0), (height,1), + dim_coords_and_dims=[(time, 0), (height, 1), (latitude, 2), (longitude, 3)], units="degree_Celsius")) pressure_on_height_levels = CubeList() pressure_on_height_levels.append( Cube( - Pdata, + p_data, long_name="pressure_on_height_levels", - dim_coords_and_dims=[(time, 0), (height,1), + dim_coords_and_dims=[(time, 0), (height, 1), (latitude, 2), (longitude, 3)], units="hPa")) surface_pressure = CubeList() surface_pressure.append( Cube( - Pdata[0,0].reshape(1, 20, 20), + p_data[0, 0].reshape(1, 20, 20), long_name="surface_pressure", dim_coords_and_dims=[(time, 0), (latitude, 1), (longitude, 2)], units="hPa")) @@ -164,11 +165,12 @@ def setUp(self): 'surface_pressure': surface_pressure} sites = OrderedDict() - sites.update({'100': {'latitude': 4.74, - 'longitude': 9.47, - 'altitude': 10, - 'gmtoffset': 0 - } + sites.update({'100': + {'latitude': 4.74, + 'longitude': 9.47, + 'altitude': 10, + 'gmtoffset': 0 + } }) neighbour_list = np.empty(1, dtype=[('i', 'i8'), @@ -190,7 +192,7 @@ def return_type(self, method, additional_data, **kwargs): """Test that the plugin returns an iris.cube.CubeList.""" plugin = ExtractData(method) result = plugin.process(self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) + self.time_dt, additional_data, **kwargs) self.assertIsInstance(result, CubeList) @@ -198,7 +200,7 @@ def extracted_value(self, method, additional_data, expected, **kwargs): """Test that the plugin returns the correct value.""" plugin = ExtractData(method) result = plugin.process(self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) + self.time_dt, additional_data, **kwargs) self.assertAlmostEqual(result[0].data, expected) def different_projection(self, method, additional_data, expected, **kwargs): @@ -219,9 +221,9 @@ def different_projection(self, method, additional_data, expected, **kwargs): y.append(y_trg) new_x = AuxCoord(x, standard_name='projection_x_coordinate', - units='m', coord_system=trg_crs_iris) + units='m', coord_system=trg_crs_iris) new_y = AuxCoord(y, standard_name='projection_y_coordinate', - units='m', coord_system=trg_crs_iris) + units='m', coord_system=trg_crs_iris) cube = Cube(self.cubes[0].data, long_name="test_data", @@ -234,7 +236,7 @@ def different_projection(self, method, additional_data, expected, **kwargs): plugin = ExtractData(method) result = plugin.process(cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) + self.time_dt, additional_data, **kwargs) self.assertEqual(cubes[0].coord_system(), trg_crs_iris) self.assertAlmostEqual(result[0].data, expected) @@ -242,7 +244,6 @@ def different_projection(self, method, additional_data, expected, **kwargs): self.assertEqual(result[0].coord(axis='x').name(), 'longitude') self.assertAlmostEqual(result[0].coord(axis='y').points, 4.74) self.assertAlmostEqual(result[0].coord(axis='x').points, 9.47) - def missing_ancillary_data(self, method, additional_data, **kwargs): """Test that the plugin copes with missing ancillary data.""" @@ -269,16 +270,17 @@ class use_nearest(TestExtractData): def test_return_type(self): self.return_type(self.method, None, ancillary_data=None) - + def test_extracted_value(self): """Test that the plugin returns the correct value.""" - expected = 20 + expected = 20 self.extracted_value(self.method, None, expected, ancillary_data=None) def test_different_projection(self): """Test that the plugin copes with non-lat/lon grids.""" - expected=20. - self.different_projection(self.method, None, expected, ancillary_data=None) + expected = 20. + self.different_projection(self.method, None, expected, + ancillary_data=None) class orography_derived_temperature_lapse_rate(TestExtractData): @@ -287,7 +289,7 @@ class orography_derived_temperature_lapse_rate(TestExtractData): def test_return_type(self): self.return_type(self.method, None, ancillary_data=self.ancillary_data) - + def test_extracted_value(self): """ Test that the plugin returns the correct value. @@ -296,13 +298,14 @@ def test_extracted_value(self): Site defined with has altitude=10, so T+expected = 20.5. """ - expected = 20.5 - self.extracted_value(self.method, None, expected, ancillary_data=self.ancillary_data) + expected = 20.5 + self.extracted_value(self.method, None, expected, + ancillary_data=self.ancillary_data) def test_different_projection(self): """ Test that the plugin copes with non-lat/lon grids. - + Cube is transformed into a LambertConformal projection. The usual latitude/longitude coordinates are used to query the grid, with iris functionality used to convert the query coordinates to the correct @@ -314,8 +317,9 @@ def test_different_projection(self): case above. """ - expected=20.5 - self.different_projection(self.method, None, expected, ancillary_data=self.ancillary_data) + expected = 20.5 + self.different_projection(self.method, None, expected, + ancillary_data=self.ancillary_data) def test_missing_ancillary_data(self): self.missing_ancillary_data(self.method, None, ancillary_data=None) @@ -326,8 +330,9 @@ class model_level_temperature_lapse_rate(TestExtractData): method = 'model_level_temperature_lapse_rate' def test_return_type(self): - self.return_type(self.method, self.ad, ancillary_data=self.ancillary_data) - + self.return_type(self.method, self.ad, + ancillary_data=self.ancillary_data) + def test_extracted_value(self): """ Test that the plugin returns the correct value. @@ -339,13 +344,14 @@ def test_extracted_value(self): """ self.sites['100']['altitude'] = 60. self.neighbour_list['dz'] = 50. - expected = -20. - self.extracted_value(self.method, self.ad, expected, ancillary_data=self.ancillary_data) + expected = -20. + self.extracted_value(self.method, self.ad, expected, + ancillary_data=self.ancillary_data) def test_different_projection(self): """ Test that the plugin copes with non-lat/lon grids. - + Cube is transformed into a LambertConformal projection. The usual latitude/longitude coordinates are used to query the grid, with iris functionality used to convert the query coordinates to the correct @@ -359,13 +365,15 @@ def test_different_projection(self): """ self.sites['100']['altitude'] = 60. self.neighbour_list['dz'] = 50. - expected = -20. + expected = -20. plugin = ExtractData(self.method) - self.different_projection(self.method, self.ad, expected, ancillary_data=self.ancillary_data) + self.different_projection(self.method, self.ad, expected, + ancillary_data=self.ancillary_data) def test_missing_additional_data(self): plugin = ExtractData(self.method) - self.missing_additional_data(self.method, None, ancillary_data=self.ancillary_data) + self.missing_additional_data(self.method, None, + ancillary_data=self.ancillary_data) if __name__ == '__main__': diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index c8abcbe84e..42e81bc266 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -93,7 +93,7 @@ def return_types(self, method): """Test that the plugin returns a numpy array.""" plugin = PointSelection(method) result = plugin.process(self.cube, self.sites, - ancillary_data=self.ancillary_data) + ancillary_data=self.ancillary_data) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.dtype, self.neighbour_list.dtype) @@ -101,7 +101,7 @@ def correct_neighbour(self, method, i_expected, j_expected, dz_expected): """Test that the plugin returns the expected neighbour""" plugin = PointSelection(method) result = plugin.process(self.cube, self.sites, - ancillary_data=self.ancillary_data) + ancillary_data=self.ancillary_data) self.assertEqual(result['i'], i_expected) self.assertEqual(result['j'], j_expected) self.assertEqual(result['dz'], dz_expected) From b81e1f02a602c744dd33be1f7fa705969ffe7dbd Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 14:08:47 +0100 Subject: [PATCH 0034/1367] Codacy bot complaints remedied. --- lib/improver/spotdata/tests/test_extract_data.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/lib/improver/spotdata/tests/test_extract_data.py b/lib/improver/spotdata/tests/test_extract_data.py index 64bd1e41b2..7f5344768f 100644 --- a/lib/improver/spotdata/tests/test_extract_data.py +++ b/lib/improver/spotdata/tests/test_extract_data.py @@ -41,9 +41,7 @@ from iris.coord_systems import GeogCS from iris.cube import (Cube, CubeList) -from iris import Constraint from iris.tests import IrisTest -from iris.time import PartialDateTime import cartopy.crs as ccrs from collections import OrderedDict from iris import FUTURE @@ -88,7 +86,6 @@ def setUp(self): calendar='gregorian')) time_dt = [dt(2017, 02, 17, 06, 00)] - # time_extract = Constraint(time=PartialDateTime(2017, 02, 17, 06, 00)) cube = Cube(data, long_name="test_data", @@ -250,7 +247,7 @@ def missing_ancillary_data(self, method, additional_data, **kwargs): plugin = ExtractData(method) msg = "Ancillary data" with self.assertRaisesRegexp(Exception, msg): - result = plugin.process( + plugin.process( self.cubes, self.sites, self.neighbour_list, self.time_dt, additional_data, **kwargs) @@ -259,7 +256,7 @@ def missing_additional_data(self, method, additional_data, **kwargs): plugin = ExtractData(method) msg = "Required additional data is unset" with self.assertRaisesRegexp(Exception, msg): - result = plugin.process( + plugin.process( self.cubes, self.sites, self.neighbour_list, self.time_dt, additional_data, **kwargs) @@ -366,12 +363,15 @@ def test_different_projection(self): self.sites['100']['altitude'] = 60. self.neighbour_list['dz'] = 50. expected = -20. - plugin = ExtractData(self.method) self.different_projection(self.method, self.ad, expected, ancillary_data=self.ancillary_data) def test_missing_additional_data(self): - plugin = ExtractData(self.method) + """ + Test for appropriate error message when required additional + diagnostics are unavailable. + + """ self.missing_additional_data(self.method, None, ancillary_data=self.ancillary_data) From 3f29344e98cd531c10427a417ace8e8e76e3e5ca Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 15:32:40 +0100 Subject: [PATCH 0035/1367] Changed file path setting to be a command line argument. --- .gitignore | 3 +++ lib/improver/spotdata/ancillaries.py | 9 +++---- lib/improver/spotdata/configurations.py | 35 +++++++++++++------------ lib/improver/spotdata/framework.py | 35 +++++++++++++++++++------ 4 files changed, 51 insertions(+), 31 deletions(-) diff --git a/.gitignore b/.gitignore index fc9ec3a569..c09bb4c49b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ # Site-specific setup etc/site-init + +# Output data +*.nc diff --git a/lib/improver/spotdata/ancillaries.py b/lib/improver/spotdata/ancillaries.py index f26cfe4e52..79de822353 100644 --- a/lib/improver/spotdata/ancillaries.py +++ b/lib/improver/spotdata/ancillaries.py @@ -35,11 +35,10 @@ """ -from os import environ as Environ from improver.spotdata.read_input import Load -def get_ancillary_data(diagnostics): +def get_ancillary_data(diagnostics, ancillary_path): ''' Takes in a list of desired diagnostics and determines which ancillary (i.e. non-time dependent) fields are required given their neighbour @@ -58,12 +57,10 @@ def get_ancillary_data(diagnostics): name and the item is the iris.cube.Cube of data. ''' - ANCILLARY_PATH = Environ.get('ANCILLARY_PATH') - ancillary_data = {} orography = Load('single_file').process( - ANCILLARY_PATH + '/orography.nc', 'surface_altitude') + ancillary_path + '/orography.nc', 'surface_altitude') ancillary_data.update({'orography': orography}) @@ -72,7 +69,7 @@ def get_ancillary_data(diagnostics): for key in diagnostics.keys()]): land = Load('single_file').process( - ANCILLARY_PATH + '/land_mask.nc', 'land_binary_mask') + ancillary_path + '/land_mask.nc', 'land_binary_mask') ancillary_data.update({'land': land}) diff --git a/lib/improver/spotdata/configurations.py b/lib/improver/spotdata/configurations.py index a2ea8e715a..787e8c8b17 100644 --- a/lib/improver/spotdata/configurations.py +++ b/lib/improver/spotdata/configurations.py @@ -30,11 +30,8 @@ # POSSIBILITY OF SUCH DAMAGE. """Test data extraction configuration""" -from os import environ as Environ -DIAGNOSTIC_FILE_PATH = Environ.get('DIAGNOSTIC_FILE_PATH') - -def all_diagnostics(): +def all_diagnostics(diagnostic_data_path): ''' Defines how all available diagnostics should be processed. A custom name used to key the returned dictionary allows for multiple variations on the @@ -49,10 +46,14 @@ def all_diagnostics(): use_nearest data extraction to simply take the value from that neighbouring point. + Args: + ----- + diagnostic_data_path : A string containing the file path of the diagnostic + data to be read in. ''' diagnostic_recipes = { 'temperature': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'temperature_at_screen_level' + '*'), 'diagnostic_name': 'air_temperature', 'neighbour_finding': 'default', @@ -60,7 +61,7 @@ def all_diagnostics(): 'extrema': True }, 'temperature_orog': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'temperature_at_screen_level' + '*'), 'diagnostic_name': 'air_temperature', 'neighbour_finding': 'default', @@ -68,7 +69,7 @@ def all_diagnostics(): 'extrema': True }, 'wind_speed': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'horizontal_wind_speed_and_direction_at_10m' + '*'), 'diagnostic_name': 'wind_speed', 'neighbour_finding': 'default', @@ -76,7 +77,7 @@ def all_diagnostics(): 'extrema': False }, 'wind_direction': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'horizontal_wind_speed_and_direction_at_10m' + '*'), 'diagnostic_name': 'wind_from_direction', 'neighbour_finding': 'default', @@ -84,7 +85,7 @@ def all_diagnostics(): 'extrema': False }, 'visibility': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'visibility_at_screen_level' + '*'), 'diagnostic_name': 'visibility_in_air', 'neighbour_finding': 'default', @@ -92,7 +93,7 @@ def all_diagnostics(): 'extrema': True }, 'relative_humidity': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'relative_humidity_at_screen_level' + '*'), 'diagnostic_name': 'relative_humidity', 'neighbour_finding': 'default', @@ -100,7 +101,7 @@ def all_diagnostics(): 'extrema': False }, 'surface_pressure': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'surface_pressure' + '*'), 'diagnostic_name': 'surface_air_pressure', 'neighbour_finding': 'default', @@ -108,7 +109,7 @@ def all_diagnostics(): 'extrema': False }, 'low_cloud_amount': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'low_cloud_amount' + '*'), 'diagnostic_name': 'low_type_cloud_area_fraction', 'neighbour_finding': 'default', @@ -116,7 +117,7 @@ def all_diagnostics(): 'extrema': False }, 'medium_cloud_amount': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'medium_cloud_amount' + '*'), 'diagnostic_name': 'medium_type_cloud_area_fraction', 'neighbour_finding': 'default', @@ -124,7 +125,7 @@ def all_diagnostics(): 'extrema': False }, 'high_cloud_amount': { - 'filepath': (DIAGNOSTIC_FILE_PATH + '/*/*' + + 'filepath': (diagnostic_data_path + '/*/*' + 'high_cloud_amount' + '*'), 'diagnostic_name': 'high_type_cloud_area_fraction', 'neighbour_finding': 'default', @@ -132,7 +133,7 @@ def all_diagnostics(): 'extrema': False }, 'total_cloud_amount': { - 'filepath': DIAGNOSTIC_FILE_PATH + '/*/*' + 'total_cloud' + '*', + 'filepath': diagnostic_data_path + '/*/*' + 'total_cloud' + '*', 'diagnostic_name': 'cloud_area_fraction', 'neighbour_finding': 'default', 'interpolation_method': 'use_nearest', @@ -142,7 +143,7 @@ def all_diagnostics(): return diagnostic_recipes -def define_diagnostics(configuration): +def define_diagnostics(configuration, data_path): ''' Define the configurations with which spotdata may be run. These configurations specify which diagnostic definitions to include @@ -162,7 +163,7 @@ def define_diagnostics(configuration): of how to process them. ''' - diagnostics = all_diagnostics() + diagnostics = all_diagnostics(data_path) configuration_dict = { 'pws_default': diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py index db4d682b54..1ab0bcfd66 100644 --- a/lib/improver/spotdata/framework.py +++ b/lib/improver/spotdata/framework.py @@ -32,7 +32,6 @@ import argparse import multiprocessing as mp -from os import environ as Environ from improver.spotdata.read_input import Load from improver.spotdata.neighbour_finding import PointSelection @@ -46,7 +45,8 @@ from improver.spotdata.configurations import define_diagnostics -def run_framework(config_name, latitudes=None, longitudes=None, +def run_framework(config_name, data_path, ancillary_path, site_path=None, + latitudes=None, longitudes=None, altitudes=None, site_ids=None, forecast_date=None, forecast_time=None, forecast_length=None, use_multiprocessing=False): @@ -63,6 +63,11 @@ def run_framework(config_name, latitudes=None, longitudes=None, to run the spotdata system. e.g. pws_default which will produce the required diagnostics for this product. + data_path : String giving path to diagnostic data files. + ancillary_path : String giving path to ancillary data files. + site_path : String giving path to site data file if in use. If + no lats/lons are specified at the command line, this + file path is needed. latitudes : A list of latitudes for running on the fly for a custom set of sites. The order should correspond to the subsequent latitudes and altitudes variables @@ -112,17 +117,21 @@ def run_framework(config_name, latitudes=None, longitudes=None, # Clumsy implementation of grabbing the BD pickle file if no sites are # specified. if latitudes is None or longitudes is None: - site_path = Environ.get('SITE_PATH') - site_path = (site_path + '/bestdata2_locsDB.pkl') - sites = ImportSiteData('pickle_file').process(site_path) + if site_path is None: + raise Exception('Site path required to get site data if no sites ' + 'are specified at runtime.') + else: + site_path = (site_path + '/bestdata2_locsDB.pkl') + sites = ImportSiteData('pickle_file').process(site_path) # Use the selected config to estabilish which diagnostics are required. # Also gets the default method of selecting grid point neighbours for the # given configuration. - neighbour_finding_default, diagnostics = define_diagnostics(config_name) + neighbour_finding_default, diagnostics = define_diagnostics(config_name, + data_path) # Load ancillary data files; fields that don't vary in time. - ancillary_data = get_ancillary_data(diagnostics) + ancillary_data = get_ancillary_data(diagnostics, ancillary_path) # Construct a set of neighbour_finding methods to be used in this run. neighbour_schemes = list( @@ -230,6 +239,15 @@ def process_diagnostic(diagnostic, neighbours, sites, forecast_times, help='Configuration to use, defining which diagnostics' ' to produce.' ) + parser.add_argument('data_path', type=str, + help='Path to diagnostic data files.' + ) + parser.add_argument('ancillary_path', type=str, + help='Path to ancillary (time invariant) data files.' + ) + parser.add_argument('--site_path', type=str, + help='Path to site data file.' + ) parser.add_argument('--latitudes', type=int, choices=range(-90, 90), nargs='+', help='Latitude of site of interest.' @@ -261,7 +279,8 @@ def process_diagnostic(diagnostic, neighbours, sites, forecast_times, args = parser.parse_args() - run_framework(args.config_name, latitudes=args.latitudes, + run_framework(args.config_name, args.data_path, args.ancillary_path, + site_path=args.site_path, latitudes=args.latitudes, longitudes=args.longitudes, altitudes=args.altitudes, site_ids=args.site_ids, forecast_date=args.start_date, forecast_time=args.start_time, From 473c062c7ce0b26129c3504b93a175c6dcd10d6b Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 16:26:44 +0100 Subject: [PATCH 0036/1367] Moved filepaths to be command line arguments. Required interface changes as well. --- lib/improver/spotdata/configurations.py | 1 + lib/improver/spotdata/extract_data.py | 7 +++++-- lib/improver/spotdata/framework.py | 6 +++--- lib/improver/spotdata/read_input.py | 6 +++--- .../spotdata/tests/test_extract_data.py | 20 ++++++++++--------- 5 files changed, 23 insertions(+), 17 deletions(-) diff --git a/lib/improver/spotdata/configurations.py b/lib/improver/spotdata/configurations.py index 787e8c8b17..307ebed88f 100644 --- a/lib/improver/spotdata/configurations.py +++ b/lib/improver/spotdata/configurations.py @@ -57,6 +57,7 @@ def all_diagnostics(diagnostic_data_path): 'temperature_at_screen_level' + '*'), 'diagnostic_name': 'air_temperature', 'neighbour_finding': 'default', + # 'interpolation_method': 'model_level_temperature_lapse_rate', 'interpolation_method': 'use_nearest', 'extrema': True }, diff --git a/lib/improver/spotdata/extract_data.py b/lib/improver/spotdata/extract_data.py index 7c103dfefe..0eb249b7a2 100644 --- a/lib/improver/spotdata/extract_data.py +++ b/lib/improver/spotdata/extract_data.py @@ -307,7 +307,7 @@ def model_level_temperature_lapse_rate(self, cube, sites, neighbours, return self.make_cube(cube, data, sites) -def get_method_prerequisites(method): +def get_method_prerequisites(method, diagnostic_data_path): ''' Determine which additional diagnostics are required for a given method of data extraction. @@ -332,5 +332,8 @@ def get_method_prerequisites(method): ad = {} for item in additional_diagnostics: - ad.update({item: get_additional_diagnostics(item)}) + ad.update({item: + get_additional_diagnostics( + item, diagnostic_data_path) + }) return ad diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py index 1ab0bcfd66..a7595e9d57 100644 --- a/lib/improver/spotdata/framework.py +++ b/lib/improver/spotdata/framework.py @@ -185,11 +185,11 @@ def run_framework(config_name, data_path, ancillary_path, site_path=None, for key in diagnostics.keys(): diagnostic = diagnostics[key] process_diagnostic(diagnostic, neighbours, sites, forecast_times, - ancillary_data) + data_path, ancillary_data) def process_diagnostic(diagnostic, neighbours, sites, forecast_times, - ancillary_data): + data_path, ancillary_data): ''' Extract data and write output for a given diagnostic. @@ -218,7 +218,7 @@ def process_diagnostic(diagnostic, neighbours, sites, forecast_times, neighbour_list = neighbours[diagnostic['neighbour_finding']] additional_data = get_method_prerequisites( - diagnostic['interpolation_method']) + diagnostic['interpolation_method'], data_path) cubes_out = ExtractData( diagnostic['interpolation_method'] diff --git a/lib/improver/spotdata/read_input.py b/lib/improver/spotdata/read_input.py index e5412fc877..80150eb6e1 100644 --- a/lib/improver/spotdata/read_input.py +++ b/lib/improver/spotdata/read_input.py @@ -78,7 +78,8 @@ def multi_file(filepath, diagnostic): return load(filepath, diagnostic) -def get_additional_diagnostics(diagnostic_name, time_extract=None): +def get_additional_diagnostics(diagnostic_name, diagnostic_data_path, + time_extract=None): """ Load additional diagnostics needed for particular spot data processes. @@ -95,10 +96,9 @@ def get_additional_diagnostics(diagnostic_name, time_extract=None): data, with a single entry is time_extract is provided. """ - from improver.spotdata.configurations import diagnostic_file_path with FUTURE.context(cell_datetime_objects=True): cubes = Load('multi_file').process( - diagnostic_file_path + '/*/*' + diagnostic_name + '*', + diagnostic_data_path + '/*/*' + diagnostic_name + '*', None) if time_extract is not None: cube = cubes.extract(time_extract) diff --git a/lib/improver/spotdata/tests/test_extract_data.py b/lib/improver/spotdata/tests/test_extract_data.py index 7f5344768f..61aa993d16 100644 --- a/lib/improver/spotdata/tests/test_extract_data.py +++ b/lib/improver/spotdata/tests/test_extract_data.py @@ -51,6 +51,7 @@ FUTURE.cell_datetime_objects = True + class TestExtractData(IrisTest): """Test the extract data plugin.""" @@ -162,13 +163,14 @@ def setUp(self): 'surface_pressure': surface_pressure} sites = OrderedDict() - sites.update({'100': - {'latitude': 4.74, - 'longitude': 9.47, - 'altitude': 10, - 'gmtoffset': 0 - } - }) + sites.update( + {'100': + {'latitude': 4.74, + 'longitude': 9.47, + 'altitude': 10, + 'gmtoffset': 0 + } + }) neighbour_list = np.empty(1, dtype=[('i', 'i8'), ('j', 'i8'), @@ -184,7 +186,6 @@ def setUp(self): self.neighbour_list = neighbour_list self.time_dt = time_dt - def return_type(self, method, additional_data, **kwargs): """Test that the plugin returns an iris.cube.CubeList.""" plugin = ExtractData(method) @@ -200,7 +201,8 @@ def extracted_value(self, method, additional_data, expected, **kwargs): self.time_dt, additional_data, **kwargs) self.assertAlmostEqual(result[0].data, expected) - def different_projection(self, method, additional_data, expected, **kwargs): + def different_projection(self, method, additional_data, expected, + **kwargs): """Test that the plugin copes with non-lat/lon grids.""" trg_crs = None From 8867c99632c27aff072851e865c02224fa88f953 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Tue, 23 May 2017 16:34:25 +0100 Subject: [PATCH 0037/1367] Removed blank line in constants.py which made pep8 complain. --- lib/improver/constants.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/improver/constants.py b/lib/improver/constants.py index 2245304b5e..1a454a1c91 100644 --- a/lib/improver/constants.py +++ b/lib/improver/constants.py @@ -38,5 +38,3 @@ # Specific heat capacity of dry air (J K-1 kg-1) CP_DRY_AIR = 1005.0 - - From 31ebd52ab4c465c3e14f3f4a6dd574e1cdff3283 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Wed, 24 May 2017 14:19:39 +0100 Subject: [PATCH 0038/1367] Moved calling of framework to CLI in expected format. --- bin/improver-spotdata | 115 +++++++++++++++++++++++++++++ lib/improver/spotdata/framework.py | 55 -------------- 2 files changed, 115 insertions(+), 55 deletions(-) create mode 100755 bin/improver-spotdata diff --git a/bin/improver-spotdata b/bin/improver-spotdata new file mode 100755 index 0000000000..a5378942b5 --- /dev/null +++ b/bin/improver-spotdata @@ -0,0 +1,115 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Script to spotdata extraction.""" +import argparse + +import iris + +from improver.spotdata.framework import run_framework + +def valid_latitude(value): + value = float(value) + if value < -90 or value > 90: + raise argparse.ArgumentTypeError("{} not in range [-90,90]".format(value)) + return value + +def valid_longitude(value): + value = float(value) + if value < -180 or value > 180: + raise argparse.ArgumentTypeError("{} not in range [-180,180]".format(value)) + return value + + +def main(): + """Load in arguments and start spotdata process.""" + parser = argparse.ArgumentParser( + description='SpotData : A configurable tool to extract spot-data ' + 'from gridded diagnostics. The method of interpolating ' + 'and adjusting the resulting data can be set by choosing ' + 'a suitable configuration.') + + parser.add_argument('config_name', + help='SpotData configuration to use, defining which ' + 'diagnostics to produce and how to interpolate ' + 'and tweak the data.' + ) + parser.add_argument('data_path', type=str, + help='Path to diagnostic data files.' + ) + parser.add_argument('ancillary_path', type=str, + help='Path to ancillary (time invariant) data files.' + ) + parser.add_argument('--site_path', type=str, + help='Path to site data file if this is being used ' + 'to choose sites.' + ) + parser.add_argument('--latitudes', type=valid_latitude, metavar='(-90,90)', + nargs='+', + help='List of latitudes of sites of interest.' + ) + parser.add_argument('--longitudes', type=valid_longitude, metavar='(-180,180)', + nargs='+', + help='List of longitudes of sites of interest.' + ) + parser.add_argument('--altitudes', type=float, nargs='+', + help='Altitudes of sites of interest.' + ) + parser.add_argument('--site_ids', type=float, nargs='+', + help='ID numbers for sites can be set if desired.' + ) + parser.add_argument('--start_date', type=str, + help='Start date of forecast in format YYYYMMDD ' + '(e.g. 20170327 = 27th March 2017).' + ) + parser.add_argument('--start_time', type=int, + help='Starting hour of forecast in 24hr clock. ' + '(e.g. 3 = 03Z, 14 = 14Z).' + ) + parser.add_argument('--length', type=int, + help='Length of forecast in hours.' + ) + parser.add_argument('--multiprocess', type=bool, + help='Process diagnostics using multiprocessing.' + ) + + args = parser.parse_args() + + run_framework(args.config_name, args.data_path, args.ancillary_path, + site_path=args.site_path, latitudes=args.latitudes, + longitudes=args.longitudes, altitudes=args.altitudes, + site_ids=args.site_ids, + forecast_date=args.start_date, forecast_time=args.start_time, + forecast_length=args.length, + use_multiprocessing=args.multiprocess) + +if __name__ == "__main__": + main() diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py index a7595e9d57..8a3731b8f2 100644 --- a/lib/improver/spotdata/framework.py +++ b/lib/improver/spotdata/framework.py @@ -231,58 +231,3 @@ def process_diagnostic(diagnostic, neighbours, sites, forecast_times, ExtractExtrema('In24hr').process(cube_out) WriteOutput('as_netcdf').process(cube_out) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='SSPS.') - parser.add_argument('config_name', - help='Configuration to use, defining which diagnostics' - ' to produce.' - ) - parser.add_argument('data_path', type=str, - help='Path to diagnostic data files.' - ) - parser.add_argument('ancillary_path', type=str, - help='Path to ancillary (time invariant) data files.' - ) - parser.add_argument('--site_path', type=str, - help='Path to site data file.' - ) - parser.add_argument('--latitudes', type=int, choices=range(-90, 90), - nargs='+', - help='Latitude of site of interest.' - ) - parser.add_argument('--longitudes', type=int, choices=range(-180, 180), - nargs='+', - help='Longitude of site of interest.' - ) - parser.add_argument('--altitudes', type=float, nargs='+', - help='Altitude of site of interest.' - ) - parser.add_argument('--site_ids', type=float, nargs='+', - help='ID no. for sites can be set if desired.' - ) - parser.add_argument('--start_date', type=str, - help='Start date of forecast in format YYYYMMDD ' - '(e.g. 20170327 = 27th March 2017).' - ) - parser.add_argument('--start_time', type=int, - help='Starting hour in 24hr clock of forecast. ' - '(e.g. 3 = 03Z, 14 = 14Z).' - ) - parser.add_argument('--length', type=int, - help='Length of forecast in hours.' - ) - parser.add_argument('--multiprocess', type=bool, - help='Process diagnostics using multiprocessing.' - ) - - args = parser.parse_args() - - run_framework(args.config_name, args.data_path, args.ancillary_path, - site_path=args.site_path, latitudes=args.latitudes, - longitudes=args.longitudes, altitudes=args.altitudes, - site_ids=args.site_ids, - forecast_date=args.start_date, forecast_time=args.start_time, - forecast_length=args.length, - use_multiprocessing=args.multiprocess) From f41e87b843228ca38d3bbbfc83b4e9d8c6b1f5d3 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 16:08:29 +0100 Subject: [PATCH 0039/1367] Minor edits following review. --- lib/improver/wind_downscaling.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/improver/wind_downscaling.py b/lib/improver/wind_downscaling.py index fd1f15ee4a..a364fe3252 100644 --- a/lib/improver/wind_downscaling.py +++ b/lib/improver/wind_downscaling.py @@ -842,10 +842,10 @@ def find_coord_order(self, mcube): """ coord_names = [self.x_name, self.y_name, self.z_name, self.t_name] - positions = [np.nan, np.nan, np.nan, np.nan] + positions = len(coord_names) * [np.nan] for coord_index, coord_name in enumerate(coord_names): if mcube.coords(coord_name, dim_coords=True): - positions[coord_index] = mcube.coord_dims(coord_name)[0] + positions[coord_index], = mcube.coord_dims(coord_name) return positions def find_heightgrid(self, wind): From bd8d5a77f9d6dedbaef7b4f6a92873e66c3833e1 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 25 May 2017 09:19:23 +0100 Subject: [PATCH 0040/1367] Removed everything but neighbour finding and dependecies. --- lib/improver/spotdata/configurations.py | 186 --------- lib/improver/spotdata/extract_data.py | 339 ---------------- lib/improver/spotdata/extrema.py | 112 ----- lib/improver/spotdata/framework.py | 233 ----------- lib/improver/spotdata/read_input.py | 107 ----- lib/improver/spotdata/site_data.py | 174 -------- .../spotdata/tests/test_extract_data.py | 382 ------------------ lib/improver/spotdata/times.py | 74 ---- lib/improver/spotdata/write_output.py | 77 ---- 9 files changed, 1684 deletions(-) delete mode 100644 lib/improver/spotdata/configurations.py delete mode 100644 lib/improver/spotdata/extract_data.py delete mode 100644 lib/improver/spotdata/extrema.py delete mode 100644 lib/improver/spotdata/framework.py delete mode 100644 lib/improver/spotdata/read_input.py delete mode 100644 lib/improver/spotdata/site_data.py delete mode 100644 lib/improver/spotdata/tests/test_extract_data.py delete mode 100644 lib/improver/spotdata/times.py delete mode 100644 lib/improver/spotdata/write_output.py diff --git a/lib/improver/spotdata/configurations.py b/lib/improver/spotdata/configurations.py deleted file mode 100644 index 307ebed88f..0000000000 --- a/lib/improver/spotdata/configurations.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -"""Test data extraction configuration""" - - -def all_diagnostics(diagnostic_data_path): - ''' - Defines how all available diagnostics should be processed. A custom name - used to key the returned dictionary allows for multiple variations on the - derivation of any one diagnostic for different products. - - e.g. - temperature - might use intelligent grid point neighbour finding to help - near the coasts, and use a model_level_temperature_lapse_rate - to adjust the extracted data for unresolved topography. - - temperature_simple - might use simple nearest neighbour finding and - use_nearest data extraction to simply take the value - from that neighbouring point. - - Args: - ----- - diagnostic_data_path : A string containing the file path of the diagnostic - data to be read in. - ''' - diagnostic_recipes = { - 'temperature': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'temperature_at_screen_level' + '*'), - 'diagnostic_name': 'air_temperature', - 'neighbour_finding': 'default', - # 'interpolation_method': 'model_level_temperature_lapse_rate', - 'interpolation_method': 'use_nearest', - 'extrema': True - }, - 'temperature_orog': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'temperature_at_screen_level' + '*'), - 'diagnostic_name': 'air_temperature', - 'neighbour_finding': 'default', - 'interpolation_method': 'orography_derived_temperature_lapse_rate', - 'extrema': True - }, - 'wind_speed': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'horizontal_wind_speed_and_direction_at_10m' + '*'), - 'diagnostic_name': 'wind_speed', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'wind_direction': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'horizontal_wind_speed_and_direction_at_10m' + '*'), - 'diagnostic_name': 'wind_from_direction', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'visibility': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'visibility_at_screen_level' + '*'), - 'diagnostic_name': 'visibility_in_air', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': True - }, - 'relative_humidity': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'relative_humidity_at_screen_level' + '*'), - 'diagnostic_name': 'relative_humidity', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'surface_pressure': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'surface_pressure' + '*'), - 'diagnostic_name': 'surface_air_pressure', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'low_cloud_amount': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'low_cloud_amount' + '*'), - 'diagnostic_name': 'low_type_cloud_area_fraction', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'medium_cloud_amount': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'medium_cloud_amount' + '*'), - 'diagnostic_name': 'medium_type_cloud_area_fraction', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'high_cloud_amount': { - 'filepath': (diagnostic_data_path + '/*/*' + - 'high_cloud_amount' + '*'), - 'diagnostic_name': 'high_type_cloud_area_fraction', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - }, - 'total_cloud_amount': { - 'filepath': diagnostic_data_path + '/*/*' + 'total_cloud' + '*', - 'diagnostic_name': 'cloud_area_fraction', - 'neighbour_finding': 'default', - 'interpolation_method': 'use_nearest', - 'extrema': False - } - } - return diagnostic_recipes - - -def define_diagnostics(configuration, data_path): - ''' - Define the configurations with which spotdata may be run. These - configurations specify which diagnostic definitions to include - when processing for a given product. - - The routine also defines a default method of grid point neighbour - finding diagnostics configurations that simply refer to 'default'. - - Args: - ----- - configuration : A string used as a key in the configuration dictionary - to select the configuration for use. - - Returns: - -------- - Dictionary containing the diagnostics to be processed and the definition - of how to process them. - - ''' - diagnostics = all_diagnostics(data_path) - - configuration_dict = { - 'pws_default': - ['temperature', 'wind_speed', 'wind_direction', 'visibility', - 'relative_humidity', 'surface_pressure', 'low_cloud_amount', - 'medium_cloud_amount', 'high_cloud_amount', 'total_cloud_amount'], - - 'short_test': - ['temperature', 'wind_speed'] - } - - neighbour_finding_default = { - 'pws_default': 'fast_nearest_neighbour', - 'short_test': 'fast_nearest_neighbour' - } - - return (neighbour_finding_default[configuration], - dict((key, diagnostics[key]) - for key in configuration_dict[configuration])) diff --git a/lib/improver/spotdata/extract_data.py b/lib/improver/spotdata/extract_data.py deleted file mode 100644 index 0eb249b7a2..0000000000 --- a/lib/improver/spotdata/extract_data.py +++ /dev/null @@ -1,339 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Gridded data extraction for the Improver site specific process chain.""" - -import numpy as np -from iris import FUTURE -from iris.coords import AuxCoord, DimCoord -from iris import Constraint -from iris.cube import Cube, CubeList -from numpy.linalg import lstsq -from improver.spotdata.common_functions import (nearest_n_neighbours, - datetime_constraint, - node_edge_test) -from improver.spotdata.ancillaries import data_from_ancillary -from improver.spotdata.read_input import get_additional_diagnostics -from improver.constants import (R_DRY_AIR, - CP_DRY_AIR) - -FUTURE.cell_datetime_objects = True - - -class ExtractData(object): - ''' - A series of methods for extracting data from grid points to derive - diagnostic values at off grid positions. - - ''' - - def __init__(self, method='use_nearest'): - """ - The class is called with the desired method to be used in extracting/ - interpolating data to the site of interest from gridded data. - - """ - - self.method = method - - def process(self, cubes, sites, neighbours, forecast_times, - additional_data, **kwargs): - """ - Call the correct function to enact the method of data extraction - specified. This function also handles multiple timesteps, consolidating - the resulting data cubes into an Iris.CubeList. - - Args: - ----- - cubes: iris.cube.CubeList of diagnostic data spanning available - times. - sites: Dictionary of site data, including lat/lon and altitude - information. - forecast_times: - A list of datetime objects representing forecast times for - which data is required. - neighbours: A list of neigbouring grid points that corresponds to sites - in the SortedDictionary of sites. - additional_data: - A dictionary containing any supplmentary time varying - diagnostics that are needed for the selected extraction - method. - ancillary_data: - A dictionary containing additional model data that - is needed. e.g. {'orography': } - - Returns: - -------- - resulting_cubes: - An iris.CubeList of irregular (i.e. non-gridded) cubes of - data that correspond to the sites of interest at the times - of interest. - """ - - if forecast_times is None: - raise Exception("No forecast times provided.") - - resulting_cubes = CubeList() - function = getattr(self, self.method) - for a_time in forecast_times: - time_extract = datetime_constraint(a_time) - cube_in, = cubes.extract(time_extract) - - if additional_data is not None: - for key in additional_data.keys(): - ad_time, = additional_data[key].extract(time_extract) - kwargs.update({key: ad_time}) - - resulting_cubes.append( - function(cube_in, sites, neighbours, **kwargs) - ) - - return resulting_cubes - - @staticmethod - def _build_coordinates(latitudes, longitudes, site_ids, gmtoffsets): - ''' - Construct coordinates for the irregular iris.Cube containing site data. - A single dimensional coordinate is created using the running order, - whilst the non-monotonically increasing coordinates (e.g. bestdata_id) - are stored in AuxilliaryCoordinates. - - Args: - ----- - latitudes : A list of latitudes ordered to correspond with the sites - OrderedDict. - longitudes : A list of longitudes ordered to correspond with the sites - OrderedDict. - site_ids : A list of bestdata site_ids ordered to correspond with the - sites OrderedDict. - gmtoffsets : A list of gmt off sets in hours ordered to correspond with - the sites OrderedDict. - - Returns: - -------- - Creates iris.DimCoord and iris.AuxCoord objects from the provided data - for use in constructing new cubes. - ''' - indices = DimCoord(np.arange(len(latitudes)), long_name='index', - units='1') - bd_ids = AuxCoord(site_ids, long_name='bestdata_id', units='1') - latitude = AuxCoord(latitudes, standard_name='latitude', - units='degrees') - longitude = AuxCoord(longitudes, standard_name='longitude', - units='degrees') - gmtoffset = AuxCoord(gmtoffsets, long_name='gmtoffset', - units='hours') - return indices, bd_ids, latitude, longitude, gmtoffset - - def make_cube(self, cube, data, sites): - ''' - Construct and return a cube containing the data extracted from the - grids by the desired method for the sites provided. - - ''' - latitudes = [site['latitude'] for site in sites.itervalues()] - longitudes = [site['longitude'] for site in sites.itervalues()] - gmtoffsets = [site['gmtoffset'] for site in sites.itervalues()] - site_ids = sites.keys() - - indices, bd_ids, latitude, longitude, gmtoffset = ( - self._build_coordinates( - latitudes, longitudes, site_ids, gmtoffsets)) - - # Add leading dimension for time. - data.resize(1, len(data)) - result_cube = Cube(data, - long_name=cube.name(), - dim_coords_and_dims=[(cube.coord('time'), 0), - (indices, 1)], - aux_coords_and_dims=[(latitude, 1), - (longitude, 1), - (gmtoffset, 1), - (bd_ids, 1)], - units=cube.units) - - # Enables use of long_name above for any name, and then moves it - # to a standard name if possible. - result_cube.rename(cube.name()) - return result_cube - - def use_nearest(self, cube, sites, neighbours, ancillary_data=None): - ''' - Simplest case, in which the diagnostic data value at the nearest grid - point, as determined by the chosen PointSelection method, is used for - the site. - - ''' - if (not cube.coord_dims(cube.coord(axis='y').name())[0] == 0 or - not cube.coord_dims(cube.coord(axis='x').name())[0] == 1): - raise Exception("Cube dimensions not as expected.") - - data = cube.data[neighbours['i'], neighbours['j']] - return self.make_cube(cube, data, sites) - - def orography_derived_temperature_lapse_rate(self, cube, sites, neighbours, - ancillary_data=None): - ''' - Crude lapse rate method that uses temperature variation and height - variation across local nodes to derive lapse rate. This is highly - prone to noise given the small number of points involved and the - variable degree to which elevation changes across the small number - of points. - - ''' - def local_lapse_rate(cube, orography, node_list): - ''' - Least-squares fit to local temperature and altitude data for grid - points defined by node_list to calculate a local lapse rate. - - ''' - y_data = cube.data[node_list] - x_data = orography[node_list] - matrix = np.vstack([x_data, np.ones(len(x_data))]).T - gradient, intercept = lstsq(matrix, y_data)[0] - return [gradient, intercept] - - orography = data_from_ancillary(ancillary_data, 'orography') - data = np.empty(shape=(len(sites))) - - for i_site, site in enumerate(sites.itervalues()): - altitude = site['altitude'] - - i, j = neighbours['i'][i_site], neighbours['j'][i_site] - edgecase = neighbours['edge'] - node_list = nearest_n_neighbours(i, j, 9) - if edgecase: - node_list = node_edge_test(node_list, cube) - - llr = local_lapse_rate(cube, orography, node_list) - data[i_site] = llr[0]*altitude + llr[1] - - return self.make_cube(cube, data, sites) - - def model_level_temperature_lapse_rate(self, cube, sites, neighbours, - ancillary_data=None, - pressure_on_height_levels=None, - surface_pressure=None, - temperature_on_height_levels=None): - ''' - Lapse rate method based on potential temperature. Follows the work of - S.B. Vosper 2005 (Near-surface temperature variations over complex - terrain). - - ''' - if (pressure_on_height_levels is None or - surface_pressure is None or - temperature_on_height_levels is None): - raise Exception( - "Required additional data is unset: \n" - "pressure_on_height_levels = {}\n" - "temperature_on_height_levels = {}\n" - "surface_pressure = {}\n".format( - pressure_on_height_levels, - temperature_on_height_levels, - surface_pressure) - ) - - pressure_on_height_levels.convert_units('hPa') - surface_pressure.convert_units('hPa') - - h50con = Constraint(height=50) - t50 = temperature_on_height_levels.extract(h50con) - p50 = pressure_on_height_levels.extract(h50con) - Kappa = R_DRY_AIR/CP_DRY_AIR - - data = np.empty(shape=(len(sites))) - for i_site in range(len(sites)): - - i, j, dz = (neighbours['i'][i_site], neighbours['j'][i_site], - neighbours['dz'][i_site]) - - # Use neighbour grid point value if vertical displacement=0. - if dz == 0.: - data[i_site] = cube.data[i, j] - continue - - t_upper = t50.data[i, j] - p_upper = p50.data[i, j] - t_surface = cube.data[i, j] - p_surface = surface_pressure.data[i, j] - - p_grad = (p_upper - p_surface)/50. - p_site = p_surface + p_grad*dz - - theta_upper = t_upper*(1000./p_upper)**Kappa - theta_surface = t_surface*(1000./p_surface)**Kappa - dthetadz = (theta_upper - theta_surface)/50. - - if abs(dz) < 1.: - t1p5 = t_surface - else: - dz = min(abs(dz), 70.)*np.sign(dz) - if dthetadz > 0: - t1p5 = theta_surface*(p_site/1000.)**Kappa - else: - t1p5 = (theta_surface + dz*dthetadz)*(p_site/1000.)**Kappa - - data[i_site] = t1p5 - - return self.make_cube(cube, data, sites) - - -def get_method_prerequisites(method, diagnostic_data_path): - ''' - Determine which additional diagnostics are required for a given - method of data extraction. - - Args: - ----- - method : The method of data extraction that is being used. - - Returns: - -------- - ad : A dictionary keyed with the diagnostic names and containing the - additional cubes that are required. - - ''' - if method == 'model_level_temperature_lapse_rate': - additional_diagnostics = [ - 'temperature_on_height_levels', - 'pressure_on_height_levels', - 'surface_pressure'] - else: - return None - - ad = {} - for item in additional_diagnostics: - ad.update({item: - get_additional_diagnostics( - item, diagnostic_data_path) - }) - return ad diff --git a/lib/improver/spotdata/extrema.py b/lib/improver/spotdata/extrema.py deleted file mode 100644 index b7e974dab8..0000000000 --- a/lib/improver/spotdata/extrema.py +++ /dev/null @@ -1,112 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -"""Gridded data extraction for the Improver site specific process chain.""" - -import numpy as np -from iris.analysis import MAX as IMAX -from iris.analysis import MIN as IMIN -from iris import FUTURE -from improver.spotdata.write_output import WriteOutput - -FUTURE.cell_datetime_objects = True - - -class ExtractExtrema(object): - '''Extract diagnostic maxima and minima in a given time period.''' - - def __init__(self, method): - """ - The class is called with the desired method, in this case the period - over which to calculate the extrema values. - - This all needs to be updated to work properly with local times if that - is desirable, and to present additional options. And to actually - function as advertised. - - INCOMPLETE. - - """ - self.method = method - - def process(self, cube): - '''Call the required method''' - function = getattr(self, self.method) - function(cube) - - @staticmethod - def In24hr(cube): - ''' - Calculate extrema values for diagnostic in cube over 24 hour period. - - Args: - ----- - cube : Cube of diagnostic data. - - Returns: - -------- - Nil. Writes out cube of extrema data. - - ''' - cube.coord('time').points = cube.coord('time').points.astype(np.int64) - - cube_max = cube.collapsed('time', IMAX) - cube_min = cube.collapsed('time', IMIN) - - cube_max.long_name = cube_max.name() + '_max' - cube_min.long_name = cube_min.name() + '_min' - cube_max.standard_name = None - cube_min.standard_name = None - - cube.coord('time').points = cube.coord('time').points.astype(np.int32) - - WriteOutput('as_netcdf').process(cube_max) - WriteOutput('as_netcdf').process(cube_min) - -# def local_dates_in_cube(cube): -# ''' -# Incomplete work on using local date time information. -# -# OUT OF DATE AND INCOMPLETE. -# ''' -# from datetime import timedelta as timedelta -# -# dates_in_cube = unit.num2date( -# b.coord('time').points, b.coord('time').units.name, -# b.coord('time').units.calendar) -# -# start_time = dates_in_cube[0] - timedelta(hours=12) -# if start_time.hour < 18: -# start_day = start_time.date() -# else: -# start_day = dates_in_cube[0].date() -# -# end_time = (dates_in_cube[-1] + timedelta(hours=12)).date() diff --git a/lib/improver/spotdata/framework.py b/lib/improver/spotdata/framework.py deleted file mode 100644 index 8a3731b8f2..0000000000 --- a/lib/improver/spotdata/framework.py +++ /dev/null @@ -1,233 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -"""The framework for site specific post-processing.""" - -import argparse -import multiprocessing as mp - -from improver.spotdata.read_input import Load -from improver.spotdata.neighbour_finding import PointSelection -from improver.spotdata.extract_data import (ExtractData, - get_method_prerequisites) -from improver.spotdata.write_output import WriteOutput -from improver.spotdata.ancillaries import get_ancillary_data -from improver.spotdata.extrema import ExtractExtrema -from improver.spotdata.site_data import ImportSiteData -from improver.spotdata.times import get_forecast_times -from improver.spotdata.configurations import define_diagnostics - - -def run_framework(config_name, data_path, ancillary_path, site_path=None, - latitudes=None, longitudes=None, - altitudes=None, site_ids=None, forecast_date=None, - forecast_time=None, forecast_length=None, - use_multiprocessing=False): - ''' - A framework that calls the components of the spotdata code. This includes - building site data into a suitable format, finding grid neighbours to - those sites with the chosen method, and then extracting data with the - chosen method. The final results are written out to new irregularly - gridded iris.cube.Cubes. - - Args: - ----- - config_name : A string giving the chosen configuration with which - to run the spotdata system. e.g. pws_default which - will produce the required diagnostics for this - product. - data_path : String giving path to diagnostic data files. - ancillary_path : String giving path to ancillary data files. - site_path : String giving path to site data file if in use. If - no lats/lons are specified at the command line, this - file path is needed. - latitudes : A list of latitudes for running on the fly for a - custom set of sites. The order should correspond - to the subsequent latitudes and altitudes variables - to construct each site. - longitudes : A list of longitudes for running on the fly for a - custom set of sites. - altitudes : A list of altitudes for running on the fly for a - custom set of sites. - site_ids : A list of site_ids to associate with the above on - the fly constructed sites. This must be ordered the - same as the latitudes/longitudes/altitudes lists. - forecast_date : A string of format YYYYMMDD defining the start date - for which forecasts are required. - forecast_time : An integer giving the hour on the forecast_date at - which to start the forecast output; 24hr clock such - that 17 = 17Z for example. - forecast_length : An integer giving the desired length of the forecast - output in hours (e.g. 48 for a two day forecast - period). - use_multiprocessing : A boolean determining whether to use multiprocessing - in the data extraction component of the code. - - Returns: - -------- - Nil. - - ''' - - # Establish forecast time list based upon input specifications, or if not - # provided, use defaults. - forecast_times = get_forecast_times(forecast_date=forecast_date, - forecast_time=forecast_time, - forecast_length=forecast_length) - - # If using locations set at command line, set optional information such - # as site altitude and site_id. - if latitudes and longitudes: - optionals = {} - if altitudes is not None: - optionals.update({'altitudes': altitudes}) - if site_ids is not None: - optionals.update({'site_ids': site_ids}) - - sites = ImportSiteData('runtime_list').process(latitudes, longitudes, - **optionals) - - # Clumsy implementation of grabbing the BD pickle file if no sites are - # specified. - if latitudes is None or longitudes is None: - if site_path is None: - raise Exception('Site path required to get site data if no sites ' - 'are specified at runtime.') - else: - site_path = (site_path + '/bestdata2_locsDB.pkl') - sites = ImportSiteData('pickle_file').process(site_path) - - # Use the selected config to estabilish which diagnostics are required. - # Also gets the default method of selecting grid point neighbours for the - # given configuration. - neighbour_finding_default, diagnostics = define_diagnostics(config_name, - data_path) - - # Load ancillary data files; fields that don't vary in time. - ancillary_data = get_ancillary_data(diagnostics, ancillary_path) - - # Construct a set of neighbour_finding methods to be used in this run. - neighbour_schemes = list( - set([diagnostics[x]['neighbour_finding'] - for x in diagnostics.keys()])) - neighbour_schemes.remove('default') - - # Set up site-grid point neighbour list using default method. Other IGPS - # methods will use this as a starting point so it must always be done. - neighbours = {} - neighbours.update( - {'default': - PointSelection(neighbour_finding_default).process( - ancillary_data['orography'], sites, - ancillary_data=ancillary_data) - }) - - # Set up site-grid point neighbour lists for all IGPS methods being used. - for scheme in neighbour_schemes: - neighbours.update( - {scheme: - PointSelection(scheme).process( - ancillary_data['orography'], sites, - ancillary_data=ancillary_data, - default_neighbours=neighbours['default']) - }) - - if use_multiprocessing: - # Process diagnostics on separate threads is multiprocessing is - # selected. Determine number of diagnostics to establish - # multiprocessing pool size. - n_diagnostic_threads = min(len(diagnostics.keys()), mp.cpu_count()) - - # Establish multiprocessing pool - each diagnostic processed on its - # own thread. - diagnostic_pool = mp.Pool(processes=n_diagnostic_threads) - - for key in diagnostics.keys(): - diagnostic = diagnostics[key] - diagnostic_pool.apply_async( - process_diagnostic, - args=( - diagnostic, neighbours, sites, forecast_times, - ancillary_data)) - - diagnostic_pool.close() - diagnostic_pool.join() - - else: - # Process diagnostics serially on one thread. - for key in diagnostics.keys(): - diagnostic = diagnostics[key] - process_diagnostic(diagnostic, neighbours, sites, forecast_times, - data_path, ancillary_data) - - -def process_diagnostic(diagnostic, neighbours, sites, forecast_times, - data_path, ancillary_data): - ''' - Extract data and write output for a given diagnostic. - - Args: - ----- - diagnostic : String naming the diagnostic to be processed. - neighbours : Dictionary of gridpoint neighbours to each site produced - by the different available neighbour finding methods that - have been used in the chosen configuration. - sites : Dictionary of spotdata sites to process. - forecast_times : Python datetime objects specifying the times for which - forecast diagnostics are required. - ancillary_data : Dictionary of time invariant fields that may be used by - the data extraction methods (e.g. orography). - - Returns: - -------- - Nil. - - ''' - # print 'neighbour finding with ', diagnostic['neighbour_finding'] - # print 'using interpolation method ', diagnostic['interpolation_method'] - - data = Load('multi_file').process(diagnostic['filepath'], - diagnostic['diagnostic_name']) - neighbour_list = neighbours[diagnostic['neighbour_finding']] - - additional_data = get_method_prerequisites( - diagnostic['interpolation_method'], data_path) - - cubes_out = ExtractData( - diagnostic['interpolation_method'] - ).process(data, sites, neighbour_list, forecast_times, - additional_data, ancillary_data=ancillary_data) - - cube_out, = cubes_out.concatenate() - - if diagnostic['extrema']: - ExtractExtrema('In24hr').process(cube_out) - - WriteOutput('as_netcdf').process(cube_out) diff --git a/lib/improver/spotdata/read_input.py b/lib/improver/spotdata/read_input.py deleted file mode 100644 index 80150eb6e1..0000000000 --- a/lib/improver/spotdata/read_input.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -Plugins written for the Improver site specific process chain. -For reading data files from UM output and site specification input. - -""" - -from iris import load_cube, load -from iris import FUTURE -from iris.cube import CubeList - -FUTURE.netcdf_promote = True - - -class Load(object): - - """Plugin for loading data.""" - - def __init__(self, method): - """ - Simple function that currently takes a filename and loads a netCDF - file. - - """ - self.method = method - - def process(self, filepath, diagnostic): - """ - Simple wrapper for using iris load on a supplied netCDF file. - - Returns - ------- - Cube - A cube containing the data from the netCDF file. - - """ - function = getattr(self, self.method) - return function(filepath, diagnostic) - - @staticmethod - def single_file(filepath, diagnostic): - """ Load and return a single iris.cube.Cube """ - return load_cube(filepath, diagnostic) - - @staticmethod - def multi_file(filepath, diagnostic): - """ Load multiple cubes and return a iris.cube.CubeList """ - return load(filepath, diagnostic) - - -def get_additional_diagnostics(diagnostic_name, diagnostic_data_path, - time_extract=None): - """ - Load additional diagnostics needed for particular spot data processes. - - Args - ---- - diagnostic_name : The name of the diagnostic to be loaded. Used to find - the relevant file. - time_extract : An iris constraint to extract and return only data from - the desired time. - - Returns - ------- - cube : An iris.cube.CubeList containing the desired diagnostic - data, with a single entry is time_extract is provided. - - """ - with FUTURE.context(cell_datetime_objects=True): - cubes = Load('multi_file').process( - diagnostic_data_path + '/*/*' + diagnostic_name + '*', - None) - if time_extract is not None: - cube = cubes.extract(time_extract) - cubes = CubeList() - cubes.append(cube) - return cubes diff --git a/lib/improver/spotdata/site_data.py b/lib/improver/spotdata/site_data.py deleted file mode 100644 index bb37938cc3..0000000000 --- a/lib/improver/spotdata/site_data.py +++ /dev/null @@ -1,174 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -Plugins written for the Improver site specific process chain. - -""" - -import numpy as np -import cPickle -from collections import OrderedDict - - -class ImportSiteData(object): - ''' - Create a dictionary of site information from a variety of sources. - Currently supported are the import of a pickle file with site - information - called with 'pickle_file' - Or lists of properties for sites - called with 'runtime_list'. - - ''' - - def __init__(self, source): - ''' - Class is called with the desired source of site data. The source - may be a pickle file or a runtime_list that is defined on the command - line or in the suite. - - Args: - ----- - source : string setting the source of site data. - - ''' - self.source = source - self.latitudes = None - self.longitudes = None - self.altitudes = None - self.site_ids = None - self.gmtoffsets = None - - def process(self, *args, **kwargs): - '''Call the required method''' - function = getattr(self, self.source) - return function(*args, **kwargs) - - def pickle_file(self, file_path): - ''' - Use a pickle file produced by the current SSPS system. - - ''' - site_data = self.read_pickle_file(file_path) - - self.latitudes = np.array([site.latitude for site in site_data]) - self.longitudes = np.array([site.longitude for site in site_data]) - self.altitudes = np.array([site.altitude for site in site_data]) - self.site_ids = np.array([site.bestdata_id for site in site_data]) - self.gmtoffsets = np.array([site.gmtoffset for site in site_data]) - - return self.construct_site_dictionary() - - @staticmethod - def read_pickle_file(file_path): - ''' - Uses existing bestdata site routines to decode pickle file created - by bestdata2. - - Args: - ----- - file_path : Path to target pickle file. - - Returns: - -------- - bd_site_data : bestdata site class containing site information. - - ''' - try: - with open(file_path, 'rb') as bd_pickle_file: - _ = cPickle.load(bd_pickle_file) - [_, _, bd_site_data, _, _] = (cPickle.load(bd_pickle_file)) - except: - raise Exception("Unable to read pickle file.") - - return bd_site_data - - def runtime_list(self, latitudes, longitudes, - altitudes=None, site_ids=None): - ''' - Use data provided on the command line/controlling suite at runtime. - - ''' - if site_ids is not None: - self.site_ids = np.array(site_ids) - else: - self.site_ids = np.arange(len(latitudes)) - if altitudes is not None: - self.altitudes = np.array(altitudes) - else: - self.altitudes = np.zeros(len(latitudes)) - - self.latitudes = np.array(latitudes) - self.longitudes = np.array(longitudes) - self.gmtoffsets = set_gmt_offset(self.longitudes) - - return self.construct_site_dictionary() - - def construct_site_dictionary(self): - ''' - Constructs a dictionary of site data regardles of source to give the - spotdata routines a consistent source of site data. - - Returns: - -------- - sites : Dictionary of site data. - - ''' - sites = OrderedDict() - for i_site, site_id in enumerate(self.site_ids): - if self.gmtoffsets[i_site] is None: - self.gmtoffsets[i_site] = 0 - sites.update( - {site_id: { - 'latitude': self.latitudes[i_site], - 'longitude': self.longitudes[i_site], - 'altitude': self.altitudes[i_site], - 'gmtoffset': self.gmtoffsets[i_site] - } - }) - return sites - - -def set_gmt_offset(longitudes): - ''' - Simplistic timezone setting for unset sites that uses 15 degree bins - centred on 0 degrees longitude. Used for on the fly site generation - when no more rigorous source of timeszone information is provided. - - Args: - ----- - longitudes : list of longitudes. - - Returns: - -------- - gmtoffsets : list of gmtoffsets calculated using longitude. - - ''' - return ((longitudes + (7.5*np.sign(longitudes)))/15).astype(int) diff --git a/lib/improver/spotdata/tests/test_extract_data.py b/lib/improver/spotdata/tests/test_extract_data.py deleted file mode 100644 index 61aa993d16..0000000000 --- a/lib/improver/spotdata/tests/test_extract_data.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -"""Unit tests for the spotdata.ExtractData plugin.""" - - -import unittest -import cf_units - -from datetime import datetime as dt -from iris.coords import (DimCoord, - AuxCoord) -from iris import coord_systems -from iris.coord_systems import GeogCS -from iris.cube import (Cube, - CubeList) -from iris.tests import IrisTest -import cartopy.crs as ccrs -from collections import OrderedDict -from iris import FUTURE -import numpy as np - -from improver.spotdata.extract_data import ExtractData - -FUTURE.cell_datetime_objects = True - - -class TestExtractData(IrisTest): - - """Test the extract data plugin.""" - - def setUp(self): - """ - Create a cube containing a regular lat-lon grid. - - Data is formatted to increase linearly in x/y dimensions, - e.g. - 0 1 2 3 - 1 2 3 4 - 2 3 4 5 - 3 4 5 6 - - """ - data = np.arange(0, 20, 1) - for i in range(1, 20): - data = np.append(data, np.arange(i, 20+i)) - - data.resize(1, 20, 20) - latitudes = np.linspace(-90, 90, 20) - longitudes = np.linspace(-180, 180, 20) - latitude = DimCoord(latitudes, standard_name='latitude', - units='degrees', coord_system=GeogCS(6371229.0)) - longitude = DimCoord(longitudes, standard_name='longitude', - units='degrees', coord_system=GeogCS(6371229.0)) - - # Use time of 2017-02-17 06:00:00 - time = DimCoord( - [1487311200], standard_name='time', - units=cf_units.Unit('seconds since 1970-01-01 00:00:00', - calendar='gregorian')) - - time_dt = [dt(2017, 02, 17, 06, 00)] - - cube = Cube(data, - long_name="test_data", - dim_coords_and_dims=[(time, 0), - (latitude, 1), - (longitude, 2)], - units="1") - - cubes = CubeList() - cubes.append(cube) - - orography = Cube(np.ones((20, 20)), - long_name="surface_altitude", - dim_coords_and_dims=[(latitude, 0), - (longitude, 1)], - units="m") - - # Western half of grid at altitude 0, eastern half at 10. - # Note that the pressure_on_height_levels data is left unchanged, - # so it is as if there is a sharp front running up the grid with - # differing pressures on either side at equivalent heights above - # the surface (e.g. east 1000hPa at 0m AMSL, west 1000hPa at 10m AMSL). - # So there is higher pressure in the west. - orography.data[0:10] = 0 - orography.data[10:] = 10 - ancillary_data = {} - ancillary_data.update({'orography': orography}) - - # Create additional vertical data used to calculate temperature lapse - # rates from model levels. - - t_level0 = np.ones((1, 20, 20))*20. - t_level1 = np.ones((1, 20, 20))*-20. - t_level2 = np.ones((1, 20, 20))*-60. - t_data = np.vstack((t_level0, t_level1, t_level2)) - t_data.resize((1, 3, 20, 20)) - - p_level0 = np.ones((1, 20, 20))*1000. - p_level1 = np.ones((1, 20, 20))*900. - p_level2 = np.ones((1, 20, 20))*800. - p_data = np.vstack((p_level0, p_level1, p_level2)) - p_data.resize((1, 3, 20, 20)) - - height = DimCoord([0., 50., 100.], standard_name='height', units='m') - - temperature_on_height_levels = CubeList() - temperature_on_height_levels.append( - Cube( - t_data, - long_name="temperature_on_height_levels", - dim_coords_and_dims=[(time, 0), (height, 1), - (latitude, 2), (longitude, 3)], - units="degree_Celsius")) - - pressure_on_height_levels = CubeList() - pressure_on_height_levels.append( - Cube( - p_data, - long_name="pressure_on_height_levels", - dim_coords_and_dims=[(time, 0), (height, 1), - (latitude, 2), (longitude, 3)], - units="hPa")) - - surface_pressure = CubeList() - surface_pressure.append( - Cube( - p_data[0, 0].reshape(1, 20, 20), - long_name="surface_pressure", - dim_coords_and_dims=[(time, 0), (latitude, 1), (longitude, 2)], - units="hPa")) - - ad = {'temperature_on_height_levels': temperature_on_height_levels, - 'pressure_on_height_levels': pressure_on_height_levels, - 'surface_pressure': surface_pressure} - - sites = OrderedDict() - sites.update( - {'100': - {'latitude': 4.74, - 'longitude': 9.47, - 'altitude': 10, - 'gmtoffset': 0 - } - }) - - neighbour_list = np.empty(1, dtype=[('i', 'i8'), - ('j', 'i8'), - ('dz', 'f8'), - ('edge', 'bool_')]) - - neighbour_list[0] = 10, 10, 0, False - - self.cubes = cubes - self.ancillary_data = ancillary_data - self.ad = ad - self.sites = sites - self.neighbour_list = neighbour_list - self.time_dt = time_dt - - def return_type(self, method, additional_data, **kwargs): - """Test that the plugin returns an iris.cube.CubeList.""" - plugin = ExtractData(method) - result = plugin.process(self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) - - self.assertIsInstance(result, CubeList) - - def extracted_value(self, method, additional_data, expected, **kwargs): - """Test that the plugin returns the correct value.""" - plugin = ExtractData(method) - result = plugin.process(self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) - self.assertAlmostEqual(result[0].data, expected) - - def different_projection(self, method, additional_data, expected, - **kwargs): - """Test that the plugin copes with non-lat/lon grids.""" - - trg_crs = None - src_crs = ccrs.PlateCarree() - trg_crs = ccrs.LambertConformal(central_longitude=50, - central_latitude=10) - trg_crs_iris = coord_systems.LambertConformal( - central_lon=50, central_lat=10) - lons = self.cubes[0].coord('longitude').points - lats = self.cubes[0].coord('latitude').points - x, y = [], [] - for lon, lat in zip(lons, lats): - x_trg, y_trg = trg_crs.transform_point(lon, lat, src_crs) - x.append(x_trg) - y.append(y_trg) - - new_x = AuxCoord(x, standard_name='projection_x_coordinate', - units='m', coord_system=trg_crs_iris) - new_y = AuxCoord(y, standard_name='projection_y_coordinate', - units='m', coord_system=trg_crs_iris) - - cube = Cube(self.cubes[0].data, - long_name="test_data", - dim_coords_and_dims=[(self.cubes[0].coord('time'), 0)], - aux_coords_and_dims=[(new_y, 1), (new_x, 2)], - units="1") - - cubes = CubeList() - cubes.append(cube) - - plugin = ExtractData(method) - result = plugin.process(cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) - - self.assertEqual(cubes[0].coord_system(), trg_crs_iris) - self.assertAlmostEqual(result[0].data, expected) - self.assertEqual(result[0].coord(axis='y').name(), 'latitude') - self.assertEqual(result[0].coord(axis='x').name(), 'longitude') - self.assertAlmostEqual(result[0].coord(axis='y').points, 4.74) - self.assertAlmostEqual(result[0].coord(axis='x').points, 9.47) - - def missing_ancillary_data(self, method, additional_data, **kwargs): - """Test that the plugin copes with missing ancillary data.""" - plugin = ExtractData(method) - msg = "Ancillary data" - with self.assertRaisesRegexp(Exception, msg): - plugin.process( - self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) - - def missing_additional_data(self, method, additional_data, **kwargs): - """Test that the plugin copes with missing additional data.""" - plugin = ExtractData(method) - msg = "Required additional data is unset" - with self.assertRaisesRegexp(Exception, msg): - plugin.process( - self.cubes, self.sites, self.neighbour_list, - self.time_dt, additional_data, **kwargs) - - -class use_nearest(TestExtractData): - - method = 'use_nearest' - - def test_return_type(self): - self.return_type(self.method, None, ancillary_data=None) - - def test_extracted_value(self): - """Test that the plugin returns the correct value.""" - expected = 20 - self.extracted_value(self.method, None, expected, ancillary_data=None) - - def test_different_projection(self): - """Test that the plugin copes with non-lat/lon grids.""" - expected = 20. - self.different_projection(self.method, None, expected, - ancillary_data=None) - - -class orography_derived_temperature_lapse_rate(TestExtractData): - - method = 'orography_derived_temperature_lapse_rate' - - def test_return_type(self): - self.return_type(self.method, None, ancillary_data=self.ancillary_data) - - def test_extracted_value(self): - """ - Test that the plugin returns the correct value. - - Fit line given data above is: T = 0.15*altitude + 19 - Site defined with has altitude=10, so T+expected = 20.5. - - """ - expected = 20.5 - self.extracted_value(self.method, None, expected, - ancillary_data=self.ancillary_data) - - def test_different_projection(self): - """ - Test that the plugin copes with non-lat/lon grids. - - Cube is transformed into a LambertConformal projection. The usual - latitude/longitude coordinates are used to query the grid, with iris - functionality used to convert the query coordinates to the correct - projection. - - The returned cube has latitude/longitude dimensions. - - The expected value should be the same as the PlateCarree() projection - case above. - - """ - expected = 20.5 - self.different_projection(self.method, None, expected, - ancillary_data=self.ancillary_data) - - def test_missing_ancillary_data(self): - self.missing_ancillary_data(self.method, None, ancillary_data=None) - - -class model_level_temperature_lapse_rate(TestExtractData): - - method = 'model_level_temperature_lapse_rate' - - def test_return_type(self): - self.return_type(self.method, self.ad, - ancillary_data=self.ancillary_data) - - def test_extracted_value(self): - """ - Test that the plugin returns the correct value. - - Site set to be 60m in altitude, which is a dz of +50m from the nearest - grid point (its neighbour). As such it should fall on the 900hPa level - and get a temperature of -20C. - - """ - self.sites['100']['altitude'] = 60. - self.neighbour_list['dz'] = 50. - expected = -20. - self.extracted_value(self.method, self.ad, expected, - ancillary_data=self.ancillary_data) - - def test_different_projection(self): - """ - Test that the plugin copes with non-lat/lon grids. - - Cube is transformed into a LambertConformal projection. The usual - latitude/longitude coordinates are used to query the grid, with iris - functionality used to convert the query coordinates to the correct - projection. - - The returned cube has latitude/longitude dimensions. - - The expected value should be the same as the PlateCarree() projection - case above. - - """ - self.sites['100']['altitude'] = 60. - self.neighbour_list['dz'] = 50. - expected = -20. - self.different_projection(self.method, self.ad, expected, - ancillary_data=self.ancillary_data) - - def test_missing_additional_data(self): - """ - Test for appropriate error message when required additional - diagnostics are unavailable. - - """ - self.missing_additional_data(self.method, None, - ancillary_data=self.ancillary_data) - - -if __name__ == '__main__': - unittest.main() diff --git a/lib/improver/spotdata/times.py b/lib/improver/spotdata/times.py deleted file mode 100644 index 144fc0ff54..0000000000 --- a/lib/improver/spotdata/times.py +++ /dev/null @@ -1,74 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. - -""" -Plugins written for the Improver site specific process chain. - -""" - -from datetime import datetime as dt -from datetime import time -from datetime import timedelta - - -def get_forecast_times(forecast_date=None, forecast_time=None, - forecast_length=None): - ''' - Generate a list of python datetime objects specifying the desired forecast - times. This list will be created from input specifications if provided. - Otherwise defaults are to start today at the most recent 6-hourly interval - (00, 06, 12, 18) and to run out to T+144 hours. - - ''' - if forecast_date is not None: - start_date = dt.strptime(forecast_date, "%Y%m%d").date() - else: - start_date = dt.utcnow().date() - - if forecast_time is not None: - forecast_start_time = dt.combine(start_date, time(forecast_time)) - else: - # If no start hour provided, go back to the nearest multiple of 6 - # hours (e.g. utcnow = 11Z --> 06Z). - forecast_start_time = dt.combine( - start_date, time(divmod(dt.utcnow().hour, 6)[0]*6)) - - if forecast_length is None: - forecast_length = 144 - - # Generate forecast times. Hourly to T+48, 3 hourly to T+144. - forecast_times = [forecast_start_time + timedelta(hours=x) for x in - range(min(forecast_length, 49))] - forecast_times = (forecast_times + - [forecast_start_time + timedelta(hours=x) for x in - range(51, min(forecast_length, 144), 3)]) - - return forecast_times diff --git a/lib/improver/spotdata/write_output.py b/lib/improver/spotdata/write_output.py deleted file mode 100644 index 64459c269c..0000000000 --- a/lib/improver/spotdata/write_output.py +++ /dev/null @@ -1,77 +0,0 @@ -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -"""Plugins written for the Improver site specific process chain.""" - -import os -from iris import FUTURE - -FUTURE.netcdf_no_unlimited = True - - -class WriteOutput(object): - ''' Writes diagnostic cube data in a format determined by the method.''' - - def __init__(self, method): - ''' - Select the method (format) for writing out the data cubes. - - Args: - ----- - method : String that sets the method. - - ''' - self.method = method - self.dir_path = os.path.dirname(os.path.realpath(__file__)) - - def process(self, cube): - '''Call the required method''' - function = getattr(self, self.method) - function(cube) - - def as_netcdf(self, cube, path=None): - ''' - Writes iris.cube.Cube data out to netCDF format files. - - Args: - ----- - cube : iris.cube.Cube diagnostic data cube. - path : Optional string setting the output path for the file. - - Returns: - -------- - Nil. Writes out file to filepath or working directory. - - ''' - from iris.fileformats.netcdf import Saver - if path is None: - path = self.dir_path - with Saver('{}/{}.nc'.format(path, cube.name()), 'NETCDF4') as output: - output.write(cube) From 6c12d8c275fdfd9a605621dc3e8620b6c71ad1ef Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 25 May 2017 09:22:34 +0100 Subject: [PATCH 0041/1367] Removed CLI component. --- bin/improver-spotdata | 115 ------------------------------------------ 1 file changed, 115 deletions(-) delete mode 100755 bin/improver-spotdata diff --git a/bin/improver-spotdata b/bin/improver-spotdata deleted file mode 100755 index a5378942b5..0000000000 --- a/bin/improver-spotdata +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# ----------------------------------------------------------------------------- -# (C) British Crown Copyright 2017 Met Office. -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this -# list of conditions and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# -# * Neither the name of the copyright holder nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE -# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -"""Script to spotdata extraction.""" -import argparse - -import iris - -from improver.spotdata.framework import run_framework - -def valid_latitude(value): - value = float(value) - if value < -90 or value > 90: - raise argparse.ArgumentTypeError("{} not in range [-90,90]".format(value)) - return value - -def valid_longitude(value): - value = float(value) - if value < -180 or value > 180: - raise argparse.ArgumentTypeError("{} not in range [-180,180]".format(value)) - return value - - -def main(): - """Load in arguments and start spotdata process.""" - parser = argparse.ArgumentParser( - description='SpotData : A configurable tool to extract spot-data ' - 'from gridded diagnostics. The method of interpolating ' - 'and adjusting the resulting data can be set by choosing ' - 'a suitable configuration.') - - parser.add_argument('config_name', - help='SpotData configuration to use, defining which ' - 'diagnostics to produce and how to interpolate ' - 'and tweak the data.' - ) - parser.add_argument('data_path', type=str, - help='Path to diagnostic data files.' - ) - parser.add_argument('ancillary_path', type=str, - help='Path to ancillary (time invariant) data files.' - ) - parser.add_argument('--site_path', type=str, - help='Path to site data file if this is being used ' - 'to choose sites.' - ) - parser.add_argument('--latitudes', type=valid_latitude, metavar='(-90,90)', - nargs='+', - help='List of latitudes of sites of interest.' - ) - parser.add_argument('--longitudes', type=valid_longitude, metavar='(-180,180)', - nargs='+', - help='List of longitudes of sites of interest.' - ) - parser.add_argument('--altitudes', type=float, nargs='+', - help='Altitudes of sites of interest.' - ) - parser.add_argument('--site_ids', type=float, nargs='+', - help='ID numbers for sites can be set if desired.' - ) - parser.add_argument('--start_date', type=str, - help='Start date of forecast in format YYYYMMDD ' - '(e.g. 20170327 = 27th March 2017).' - ) - parser.add_argument('--start_time', type=int, - help='Starting hour of forecast in 24hr clock. ' - '(e.g. 3 = 03Z, 14 = 14Z).' - ) - parser.add_argument('--length', type=int, - help='Length of forecast in hours.' - ) - parser.add_argument('--multiprocess', type=bool, - help='Process diagnostics using multiprocessing.' - ) - - args = parser.parse_args() - - run_framework(args.config_name, args.data_path, args.ancillary_path, - site_path=args.site_path, latitudes=args.latitudes, - longitudes=args.longitudes, altitudes=args.altitudes, - site_ids=args.site_ids, - forecast_date=args.start_date, forecast_time=args.start_time, - forecast_length=args.length, - use_multiprocessing=args.multiprocess) - -if __name__ == "__main__": - main() From fc6e99f270fd9eec466d3c12bfde085ac674efd7 Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Thu, 25 May 2017 10:36:19 +0100 Subject: [PATCH 0042/1367] Adding changes to address problems raised in reviews --- ...est_weighted_blend_BasicWeightedAverage.py | 30 ++++++++++++------- lib/improver/weighted_blend.py | 18 +++++------ 2 files changed, 29 insertions(+), 19 deletions(-) diff --git a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py index de426174f3..1f013a5544 100644 --- a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py +++ b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py @@ -44,6 +44,17 @@ from improver.weighted_blend import BasicWeightedAverage +def example_coord_adjust(pnts): + """ Example function for coord_adjust. + A Function to apply to the coordinate after + collapsing the cube to correct the values. + + Args: + pnts : numpy.ndarray + """ + return pnts[len(pnts)-1] + + class TestBasicWeightedAverage(IrisTest): """Test the Basic Weighted Average plugin.""" @@ -80,7 +91,7 @@ def test_basic(self): self.assertIsInstance(result, Cube) def test_fails_coord_not_in_cube(self): - """Test it Raises a Value Error if coord not in the cube""" + """Test it Raises a Value Error if coord not in the cube.""" coord = "notset" plugin = BasicWeightedAverage(coord) msg = ('The coord for this plugin must be ' + @@ -89,7 +100,7 @@ def test_fails_coord_not_in_cube(self): plugin.process(self.cube) def test_fails_input_not_a_cube(self): - """Test it Raises a Value Error if not supplied with a cube""" + """Test it Raises a Value Error if not supplied with a cube.""" coord = "time" plugin = BasicWeightedAverage(coord) notacube = 0.0 @@ -100,7 +111,7 @@ def test_fails_input_not_a_cube(self): def test_fails_weights_shape(self): """Test it Raises a Value Error if weights shape does not match - coord shape""" + coord shape.""" coord = "time" plugin = BasicWeightedAverage(coord) weights = [0.1, 0.2, 0.7] @@ -110,15 +121,15 @@ def test_fails_weights_shape(self): plugin.process(self.cube, weights) def test_coord_adjust_set(self): - """Test it works with coord adjust set""" + """Test it works with coord adjust set.""" coord = "time" - coord_adjust = lambda pnts: pnts[len(pnts)-1] + coord_adjust = example_coord_adjust plugin = BasicWeightedAverage(coord, coord_adjust) result = plugin.process(self.cube) self.assertAlmostEquals(result.coord(coord).points, [402193.5]) def test_scalar_coord(self): - """Test it works on scalar coord""" + """Test it works on scalar coord.""" coord = "dummy_scalar_coord" plugin = BasicWeightedAverage(coord) weights = np.array([1.0]) @@ -130,11 +141,10 @@ def test_scalar_coord(self): warning_msg = "Could not find collapse dimension" self.assertTrue(any(warning_msg in str(item) for item in warning_list)) - print warning_list self.assertArrayAlmostEqual(result.data, self.cube.data) def test_weights_equal_none(self): - """Test it works with weights set to None""" + """Test it works with weights set to None.""" coord = "time" plugin = BasicWeightedAverage(coord) weights = None @@ -143,7 +153,7 @@ def test_weights_equal_none(self): self.assertArrayAlmostEqual(result.data, expected_result_array) def test_weights_equal_list(self): - """Test it work with weights set to list [0.2, 0.8]""" + """Test it work with weights set to list [0.2, 0.8].""" coord = "time" plugin = BasicWeightedAverage(coord) weights = [0.2, 0.8] @@ -152,7 +162,7 @@ def test_weights_equal_list(self): self.assertArrayAlmostEqual(result.data, expected_result_array) def test_weights_equal_array(self): - """Test it works with weights set to array (0.8, 0.2)""" + """Test it works with weights set to array (0.8, 0.2).""" coord = "time" plugin = BasicWeightedAverage(coord) weights = np.array([0.8, 0.2]) diff --git a/lib/improver/weighted_blend.py b/lib/improver/weighted_blend.py index a938950bb7..d772bec0eb 100644 --- a/lib/improver/weighted_blend.py +++ b/lib/improver/weighted_blend.py @@ -43,12 +43,12 @@ def __init__(self, coord, coord_adjust=None): Args: coord : string - The name/s of a coordinate dimension/s in the cube + The name of a coordinate dimension in the cube. coord_adjust : Function to apply to the coordinate after - collapsing the cube to correct the values + collapsing the cube to correct the values, for example for time windowing and cycle averaging the follow function would - adjust the time coordinates + adjust the time coordinates. e.g. coord_adjust = lambda pnts: pnts[len(pnts)/2] """ self.coord = coord @@ -75,12 +75,12 @@ def process(self, cube, weights=None): if not isinstance(cube, iris.cube.Cube): raise ValueError('The first argument must be an instance of ' + 'iris.cube.Cube but is' + - ' {0:s}'.format(type(cube))) + ' {0:s}.'.format(type(cube))) if not cube.coords(self.coord): raise ValueError('The coord for this plugin must be ' + - 'an existing coordinate in the input cube') + 'an existing coordinate in the input cube.') # Find the coords dimension. - # If coord is a scalar_coord try adding it + # If coord is a scalar_coord try adding it. collapse_dim = cube.coord_dims(self.coord) if not collapse_dim: msg = ('Could not find collapse dimension, ' + @@ -88,7 +88,7 @@ def process(self, cube, weights=None): warnings.warn(msg) cube = iris.util.new_axis(cube, self.coord) collapse_dim = cube.coord_dims(self.coord) - # supply weights as an array of weights whose shape matches the cube + # Supply weights as an array of weights whose shape matches the cube. weights_array = None if weights is not None: if np.array(weights).shape != cube.coord(self.coord).points.shape: @@ -97,10 +97,10 @@ def process(self, cube, weights=None): weights_array = iris.util.broadcast_to_shape(np.array(weights), cube.shape, collapse_dim) - # Calculate the weighted average + # Calculate the weighted average. result = cube.collapsed(self.coord, iris.analysis.MEAN, weights=weights_array) - # if set adjust values of collapsed coordinates + # If set adjust values of collapsed coordinates. if self.coord_adjust is not None: for crd in result.coords(): if cube.coord_dims(crd.name()) == collapse_dim: From 5befdfa90669238cd7fb87b6b42eb6e3a24fad30 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 10:45:47 +0100 Subject: [PATCH 0043/1367] #12/13: Test/implement Sphinx for autogenerated documentation (#62) * Clean sphinx config file * Added napoleon plugin and added lib/improver to path for Sphinx * Test for apidoc * Sphinx config ignore tests * Sphinx pointed to right dir * Doc index contains link to improver package - easier to do things this way * Default theme stuff for Sphinx --- doc/Makefile | 229 ++++++++++++++++++++++++++++ doc/source/conf.py | 354 +++++++++++++++++++++++++++++++++++++++++++ doc/source/index.rst | 19 +++ 3 files changed, 602 insertions(+) create mode 100644 doc/Makefile create mode 100644 doc/source/conf.py create mode 100644 doc/source/index.rst diff --git a/doc/Makefile b/doc/Makefile new file mode 100644 index 0000000000..ff70b70f3a --- /dev/null +++ b/doc/Makefile @@ -0,0 +1,229 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " epub3 to make an epub3" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + @echo " dummy to check syntax errors of document sources" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: apidoc +apidoc: + sphinx-apidoc -e -P -f -o source ../lib/improver ../lib/improver/tests + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Improver.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Improver.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/Improver" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Improver" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: epub3 +epub3: + $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 + @echo + @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." + +.PHONY: dummy +dummy: + $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy + @echo + @echo "Build finished. Dummy builder generates no files." diff --git a/doc/source/conf.py b/doc/source/conf.py new file mode 100644 index 0000000000..3b9b74ca40 --- /dev/null +++ b/doc/source/conf.py @@ -0,0 +1,354 @@ +# -*- coding: utf-8 -*- +# +# Improver documentation build configuration file, created by +# sphinx-quickstart on Fri May 19 13:27:21 2017. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. + +import os +import sys +sys.path.insert(0, os.path.abspath('../../lib/')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.napoleon', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.coverage', + 'sphinx.ext.imgmath', + 'sphinx.ext.ifconfig', + 'sphinx.ext.viewcode', +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = u'Improver' +copyright = u'2017, Met Office' +author = u'Met Office' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +version = u'1.0' +# The full version, including alpha/beta/rc tags. +release = u'1.0' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'alabaster' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { +'font_size': '16px', # slightly smaller (default 17px) +'page_width': '1080px' # so 80 chars of code fit (default 940px) +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +# html_title = u'Improver v1.0' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = 'Improverdoc' + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'Improver.tex', u'Improver Documentation', + u'Met Office', 'manual'), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# It false, will not define \strong, \code, itleref, \crossref ... but only +# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added +# packages. +# +# latex_keep_old_macro_names = True + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'improver', u'Improver Documentation', + [author], 1) +] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'Improver', u'Improver Documentation', + author, 'Improver', 'One line description of project.', + 'Miscellaneous'), +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = {'https://docs.python.org/': None} diff --git a/doc/source/index.rst b/doc/source/index.rst new file mode 100644 index 0000000000..f95d10a808 --- /dev/null +++ b/doc/source/index.rst @@ -0,0 +1,19 @@ +.. Improver documentation master file, created by + sphinx-quickstart on Fri May 19 13:27:21 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Improver's documentation! +==================================== + +This should contain a link to the API documentation. +See :doc:`improver` + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + From ef542ded899a1013a7306a417049eaaacb6cf41f Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 13:22:13 +0100 Subject: [PATCH 0044/1367] Sphinx: Napoleon to document constructor methods --- doc/source/conf.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/source/conf.py b/doc/source/conf.py index 3b9b74ca40..4fc2f9dfd8 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -352,3 +352,6 @@ # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'https://docs.python.org/': None} + +# Get napoleon to document constructor methods. +napoleon_include_init_with_doc = True From 058d1395122aa252fe63412fa9ebfa0d540dcf9a Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 13:27:38 +0100 Subject: [PATCH 0045/1367] Run apidoc to generate rst files --- ...emble_calibration.ensemble_calibration.rst | 7 +++++ ...bration.ensemble_calibration_utilities.rst | 7 +++++ doc/source/improver.ensemble_calibration.rst | 18 +++++++++++++ doc/source/improver.grids.osgb.rst | 7 +++++ doc/source/improver.grids.rst | 17 ++++++++++++ doc/source/improver.nbhood.rst | 7 +++++ doc/source/improver.rst | 26 +++++++++++++++++++ doc/source/improver.threshold.rst | 7 +++++ doc/source/modules.rst | 7 +++++ 9 files changed, 103 insertions(+) create mode 100644 doc/source/improver.ensemble_calibration.ensemble_calibration.rst create mode 100644 doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst create mode 100644 doc/source/improver.ensemble_calibration.rst create mode 100644 doc/source/improver.grids.osgb.rst create mode 100644 doc/source/improver.grids.rst create mode 100644 doc/source/improver.nbhood.rst create mode 100644 doc/source/improver.rst create mode 100644 doc/source/improver.threshold.rst create mode 100644 doc/source/modules.rst diff --git a/doc/source/improver.ensemble_calibration.ensemble_calibration.rst b/doc/source/improver.ensemble_calibration.ensemble_calibration.rst new file mode 100644 index 0000000000..0c3378a74b --- /dev/null +++ b/doc/source/improver.ensemble_calibration.ensemble_calibration.rst @@ -0,0 +1,7 @@ +improver.ensemble_calibration.ensemble_calibration module +========================================================= + +.. automodule:: improver.ensemble_calibration.ensemble_calibration + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst b/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst new file mode 100644 index 0000000000..bbd19c07ad --- /dev/null +++ b/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst @@ -0,0 +1,7 @@ +improver.ensemble_calibration.ensemble_calibration_utilities module +=================================================================== + +.. automodule:: improver.ensemble_calibration.ensemble_calibration_utilities + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.ensemble_calibration.rst b/doc/source/improver.ensemble_calibration.rst new file mode 100644 index 0000000000..05100adf26 --- /dev/null +++ b/doc/source/improver.ensemble_calibration.rst @@ -0,0 +1,18 @@ +improver.ensemble_calibration package +===================================== + +Submodules +---------- + +.. toctree:: + + improver.ensemble_calibration.ensemble_calibration + improver.ensemble_calibration.ensemble_calibration_utilities + +Module contents +--------------- + +.. automodule:: improver.ensemble_calibration + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.grids.osgb.rst b/doc/source/improver.grids.osgb.rst new file mode 100644 index 0000000000..6733f3c434 --- /dev/null +++ b/doc/source/improver.grids.osgb.rst @@ -0,0 +1,7 @@ +improver.grids.osgb module +========================== + +.. automodule:: improver.grids.osgb + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.grids.rst b/doc/source/improver.grids.rst new file mode 100644 index 0000000000..033f58985d --- /dev/null +++ b/doc/source/improver.grids.rst @@ -0,0 +1,17 @@ +improver.grids package +====================== + +Submodules +---------- + +.. toctree:: + + improver.grids.osgb + +Module contents +--------------- + +.. automodule:: improver.grids + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.nbhood.rst b/doc/source/improver.nbhood.rst new file mode 100644 index 0000000000..8e19c54135 --- /dev/null +++ b/doc/source/improver.nbhood.rst @@ -0,0 +1,7 @@ +improver.nbhood module +====================== + +.. automodule:: improver.nbhood + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.rst b/doc/source/improver.rst new file mode 100644 index 0000000000..eded451f1d --- /dev/null +++ b/doc/source/improver.rst @@ -0,0 +1,26 @@ +improver package +================ + +Subpackages +----------- + +.. toctree:: + + improver.ensemble_calibration + improver.grids + +Submodules +---------- + +.. toctree:: + + improver.nbhood + improver.threshold + +Module contents +--------------- + +.. automodule:: improver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/improver.threshold.rst b/doc/source/improver.threshold.rst new file mode 100644 index 0000000000..708c200fba --- /dev/null +++ b/doc/source/improver.threshold.rst @@ -0,0 +1,7 @@ +improver.threshold module +========================= + +.. automodule:: improver.threshold + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/source/modules.rst b/doc/source/modules.rst new file mode 100644 index 0000000000..1a1bcf8da2 --- /dev/null +++ b/doc/source/modules.rst @@ -0,0 +1,7 @@ +improver +======== + +.. toctree:: + :maxdepth: 4 + + improver From 99474db2936795c20e7b02d65ee3cf101e134c37 Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Thu, 25 May 2017 14:29:18 +0100 Subject: [PATCH 0046/1367] An initial draft of the weights function --- lib/improver/weights.py | 220 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) create mode 100644 lib/improver/weights.py diff --git a/lib/improver/weights.py b/lib/improver/weights.py new file mode 100644 index 0000000000..3916eabb98 --- /dev/null +++ b/lib/improver/weights.py @@ -0,0 +1,220 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Module to create the weights used to Blend data.""" + + +import numpy as np +import iris + + +def normalise_weights(weights): + """Ensures all weights add up to one + + Args: + weights : array of weights. + + Returns: + normalised_weights : array of weights where + sum = 1.0. + + """ + sumval = weights.sum() + if sumval == 0: + raise ValueError('Sum of weights must be > 0.0') + + normalised_weights = weights / sumval + return normalised_weights + + +def nonlinear_weights(num_of_weights, cval): + """Create nonlinear weights + + Args: + num_of_weights : Positive Integer - Number of weights to create. + + cval : Float - greater than 0.0 but less than or equal to 1,0, + to be used for the nonlinear weights function. + 1.0 = equal weights for all. + + Weights will be calculated as + cval**(tval-1)/Sum(of all weights) + tval is the value of 1 to num_of_weights, + + Returns: + weights : array of weights, sum of all weights = 1.0 + + """ + if not isinstance(num_of_weights, int) or num_of_weights <= 0: + raise ValueError('Number of weights must be integer > 0') + if cval <= 0.0 or cval > 1.0: + raise ValueError('cval must be greater than 0.0 and less ' + + 'than or equal to 1.0') + weights_list = [] + for tval_minus1 in range(0, num_of_weights): + weights_list.append(cval**(tval_minus1)) + + weights = normalise_weights(np.array(weights_list)) + + return weights + + +def linear_weights(num_of_weights, y0val=1.0, slope=0.0, + ynval=None): + """Create linear weights + + Args: + num_of_weights : Positive Integer - Number of weights to create. + y0val = float: relative value of starting point. Default = 1.0 + + AND EITHER: + slope = float: slope of the line. Default = 0.0 (equal weights) + OR + ynval = float or None: relative weights of last point. + Default value is None + + Returns: + weights : array of weights, sum of all weights = 1.0 + + """ + if not isinstance(num_of_weights, int) or num_of_weights <= 0: + raise ValueError('Number of weights must be interger > 0') + if ynval is not None: + if slope != 0.0: + raise ValueError('Relative end weight or slope must be set' + + ' but not both.') + else: + slope = (ynval - y0val)/(num_of_weights - 1.0) + weights_list = [] + for tval in range(0, num_of_weights): + weights_list.append(slope*tval + y0val) + + weights = normalise_weights(np.array(weights_list)) + + return weights + + +class ChooseDefaultWeightsLinear(object): + """ Calculate Default Weights using Linear Function. """ + + def __init__(self, y0val=None, slope=0.0, ynval=None): + """Set up for calculating default weights using linear function + y0val = None or flaot: relative value of starting point. + slope = float: slope of the line. Default = 0.0 (equal weights) + ynval = float or None: relative weights of last point. + Default value is None + + slope OR ynval should be set but NOT BOTH + + If y0val value is not set or set to None + then the code assumes that the ultimate default values of + y0val = 20.0 and ynval = 2.0 are required + """ + self.slope = slope + self.ynval = ynval + if y0val is None: + self.y0val = 20.0 + self.ynval = 2.0 + else: + self.y0val = y0val + + def process(self, cube, coord): + """Calculated weights for a given cube and coord + + Args: + cube : iris.cube.Cube + Cube to blend across the coord. + coord : string + The name of a coordinate dimension in the cube. + Returns: + weights : array of weights, sum of all weights = 1.0 + """ + if not isinstance(cube, iris.cube.Cube): + raise ValueError('The first argument must be an instance of ' + + 'iris.cube.Cube but is' + + ' {0:s}'.format(type(cube))) + if not cube.coord(coord): + raise ValueError('The coord for this plugin must be ' + + 'an existing coordinate in the input cube') + num_of_weights = len(cube.coord(coord).points) + + weights = linear_weights(num_of_weights, y0val=self.y0val, + slope=self.slope, + ynval=self.ynval) + + return weights + + def __repr__(self): + """Represent the configured plugin instance as a string.""" + desc = ' Date: Thu, 25 May 2017 14:34:30 +0100 Subject: [PATCH 0047/1367] Added conda and readthedocs scripts --- .readthedocs.yml | 2 ++ environment.yml | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 41 insertions(+) create mode 100644 .readthedocs.yml create mode 100644 environment.yml diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000000..5d3b36c799 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,2 @@ +conda: + file: environment.yml diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000..783b878973 --- /dev/null +++ b/environment.yml @@ -0,0 +1,39 @@ +# Taken from iris: https://github.com/SciTools/iris/ +# Use this file to create a conda environment using: +# conda create -n --file environment.yml + +iris + +# Mandatory dependencies +biggus +cartopy +matplotlib<1.9 +netcdf4 +numpy +pyke +udunits2 +cf_units + +# Iris build dependencies +setuptools + +# Iris testing/documentation dependencies +mock +nose +pep8 +sphinx +iris-sample-data +filelock +imagehash +requests + +# Optional iris dependencies +nc_time_axis +iris-grib +esmpy>=7.0 +gdal +libmo_unpack +pandas +pyugrid +mo_pack +python-stratify From 2bfce223dcfb1758fca8193fdef4924f286e0d0c Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 14:53:19 +0100 Subject: [PATCH 0048/1367] environment.yml update --- environment.yml | 70 +++++++++++++++++++++++++------------------------ 1 file changed, 36 insertions(+), 34 deletions(-) diff --git a/environment.yml b/environment.yml index 783b878973..e56f0e4ee9 100644 --- a/environment.yml +++ b/environment.yml @@ -2,38 +2,40 @@ # Use this file to create a conda environment using: # conda create -n --file environment.yml -iris +name: iris +channels: + - conda-forge +dependencies: + - iris=1.11.* + - biggus + - cartopy + - matplotlib<1.9 + - netcdf4 + - numpy + - pyke + - udunits2 + - cf_units -# Mandatory dependencies -biggus -cartopy -matplotlib<1.9 -netcdf4 -numpy -pyke -udunits2 -cf_units - -# Iris build dependencies -setuptools - -# Iris testing/documentation dependencies -mock -nose -pep8 -sphinx -iris-sample-data -filelock -imagehash -requests - -# Optional iris dependencies -nc_time_axis -iris-grib -esmpy>=7.0 -gdal -libmo_unpack -pandas -pyugrid -mo_pack -python-stratify +## Iris build dependencies +#setuptools +# +## Iris testing/documentation dependencies +#mock +#nose +#pep8 +#sphinx +#iris-sample-data +#filelock +#imagehash +#requests +# +## Optional iris dependencies +#nc_time_axis +#iris-grib +#esmpy>=7.0 +#gdal +#libmo_unpack +#pandas +#pyugrid +#mo_pack +##python-stratify From b3d46074a781947823f67313e70a743fdb018747 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:05:25 +0100 Subject: [PATCH 0049/1367] updated sphinx conf.py --- doc/source/conf.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index 4fc2f9dfd8..c24850a52c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -35,8 +35,6 @@ 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.coverage', - 'sphinx.ext.imgmath', - 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode', ] From d3b6af317f3fbc56997bf4eaeb3db0c14e900097 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:35:57 +0100 Subject: [PATCH 0050/1367] Sphinx: Conda environment test --- environment.yml | 48 ++++++++++++++---------------------------------- 1 file changed, 14 insertions(+), 34 deletions(-) diff --git a/environment.yml b/environment.yml index e56f0e4ee9..debc75399b 100644 --- a/environment.yml +++ b/environment.yml @@ -2,40 +2,20 @@ # Use this file to create a conda environment using: # conda create -n --file environment.yml -name: iris +name: improver channels: - conda-forge + - scitools dependencies: - - iris=1.11.* - - biggus - - cartopy - - matplotlib<1.9 - - netcdf4 - - numpy - - pyke - - udunits2 - - cf_units - -## Iris build dependencies -#setuptools -# -## Iris testing/documentation dependencies -#mock -#nose -#pep8 -#sphinx -#iris-sample-data -#filelock -#imagehash -#requests -# -## Optional iris dependencies -#nc_time_axis -#iris-grib -#esmpy>=7.0 -#gdal -#libmo_unpack -#pandas -#pyugrid -#mo_pack -##python-stratify + - sphinx + - iris: + - biggus + - cartopy + - matplotlib<1.9 + - netcdf4 + - numpy + - pyke + - udunits2 + - cf_units + - setuptools + - mo_pack From 5b222e7726e0b5910716b5cc62b5fb98a3c93c18 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:43:04 +0100 Subject: [PATCH 0051/1367] Sphinx: Conda environment test - reorder --- environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environment.yml b/environment.yml index debc75399b..404701f46b 100644 --- a/environment.yml +++ b/environment.yml @@ -4,8 +4,8 @@ name: improver channels: - - conda-forge - scitools + - conda-forge dependencies: - sphinx - iris: From 35dfa9ba2b8fd4a53d46f85b7f513fbdc466006c Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:46:59 +0100 Subject: [PATCH 0052/1367] Sphinx: Conda environment test - restructure --- environment.yml | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/environment.yml b/environment.yml index 404701f46b..1de39f9185 100644 --- a/environment.yml +++ b/environment.yml @@ -8,14 +8,14 @@ channels: - conda-forge dependencies: - sphinx - - iris: - - biggus - - cartopy - - matplotlib<1.9 - - netcdf4 - - numpy - - pyke - - udunits2 - - cf_units - - setuptools - - mo_pack + - iris + - biggus + - cartopy + - matplotlib<1.9 + - netcdf4 + - numpy + - pyke + - udunits2 + - cf_units + - setuptools + - mo_pack From fa62a44b8e3c537845a41d249a65aa8ccdcb6e21 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:48:52 +0100 Subject: [PATCH 0053/1367] Sphinx: Conda environment: change channel --- environment.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/environment.yml b/environment.yml index 1de39f9185..09edba2ee2 100644 --- a/environment.yml +++ b/environment.yml @@ -4,7 +4,6 @@ name: improver channels: - - scitools - conda-forge dependencies: - sphinx From 396c69a4da0b001139b9177ee727d7874676902e Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 15:51:02 +0100 Subject: [PATCH 0054/1367] Sphinx: reset font size to default --- doc/source/conf.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index c24850a52c..f2a2aa585c 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -133,7 +133,6 @@ # documentation. # html_theme_options = { -'font_size': '16px', # slightly smaller (default 17px) 'page_width': '1080px' # so 80 chars of code fit (default 940px) } From 2a6d9b19feca800be3bd2b4cab28d1dd793fa3d7 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Thu, 25 May 2017 16:50:16 +0100 Subject: [PATCH 0055/1367] Change Sphinx theme to default --- doc/source/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index f2a2aa585c..8d715a781b 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -126,7 +126,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'alabaster' +html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the From 9a305adc5a52aa2113a837be036dc4cd832bcbf2 Mon Sep 17 00:00:00 2001 From: Ben Fitzpatrick Date: Fri, 26 May 2017 15:21:51 +0100 Subject: [PATCH 0056/1367] Update README.md Reviewed by Tomek at my desk --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 04567755b1..8d4adcf313 100644 --- a/README.md +++ b/README.md @@ -2,5 +2,6 @@ [![License](https://img.shields.io/badge/License-BSD%203--Clause-blue.svg)](https://opensource.org/licenses/BSD-3-Clause) [![Build Status](https://travis-ci.org/metoppv/improver.svg?branch=master)](https://travis-ci.org/metoppv/improver) +[![Codacy Badge](https://api.codacy.com/project/badge/Grade/62804cd1266246f4a04381805f3774f4)](https://www.codacy.com/app/metoppv/improver?utm_source=github.com&utm_medium=referral&utm_content=metoppv/improver&utm_campaign=Badge_Grade) IMPROVER is a library of algorithms for meteorological post-processing and verification. From 7395ee0e87f53d27af16b55380d897841d694ccf Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Tue, 30 May 2017 08:16:12 +0100 Subject: [PATCH 0057/1367] Minor bug fix to weights.py --- lib/improver/weights.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/improver/weights.py b/lib/improver/weights.py index 3916eabb98..6d8b3cb22f 100644 --- a/lib/improver/weights.py +++ b/lib/improver/weights.py @@ -175,9 +175,9 @@ def __repr__(self): """Represent the configured plugin instance as a string.""" desc = ' Date: Tue, 30 May 2017 08:52:32 +0100 Subject: [PATCH 0058/1367] More minor corrections to the code --- ...est_weighted_blend_BasicWeightedAverage.py | 12 ++++++--- lib/improver/weighted_blend.py | 26 ++++++++++++------- 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py index 1f013a5544..a2f42800a5 100644 --- a/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py +++ b/lib/improver/tests/test_weighted_blend_BasicWeightedAverage.py @@ -91,7 +91,7 @@ def test_basic(self): self.assertIsInstance(result, Cube) def test_fails_coord_not_in_cube(self): - """Test it Raises a Value Error if coord not in the cube.""" + """Test it raises a Value Error if coord not in the cube.""" coord = "notset" plugin = BasicWeightedAverage(coord) msg = ('The coord for this plugin must be ' + @@ -100,7 +100,7 @@ def test_fails_coord_not_in_cube(self): plugin.process(self.cube) def test_fails_input_not_a_cube(self): - """Test it Raises a Value Error if not supplied with a cube.""" + """Test it raises a Value Error if not supplied with a cube.""" coord = "time" plugin = BasicWeightedAverage(coord) notacube = 0.0 @@ -110,7 +110,7 @@ def test_fails_input_not_a_cube(self): plugin.process(notacube) def test_fails_weights_shape(self): - """Test it Raises a Value Error if weights shape does not match + """Test it raises a Value Error if weights shape does not match coord shape.""" coord = "time" plugin = BasicWeightedAverage(coord) @@ -129,7 +129,11 @@ def test_coord_adjust_set(self): self.assertAlmostEquals(result.coord(coord).points, [402193.5]) def test_scalar_coord(self): - """Test it works on scalar coord.""" + """Test it works on scalar coordinate + and check that a warning has been raised + if the dimension that you want to blend on + is a scalar coordinate. + """ coord = "dummy_scalar_coord" plugin = BasicWeightedAverage(coord) weights = np.array([1.0]) diff --git a/lib/improver/weighted_blend.py b/lib/improver/weighted_blend.py index d772bec0eb..cf95ad12b3 100644 --- a/lib/improver/weighted_blend.py +++ b/lib/improver/weighted_blend.py @@ -60,7 +60,7 @@ def __repr__(self): '').format(self.coord) def process(self, cube, weights=None): - """Calculated weighted mean across the chosen coord + """Calculate weighted mean across the chosen coord Args: cube : iris.cube.Cube @@ -70,20 +70,23 @@ def process(self, cube, weights=None): Returns: result : iris.cube.Cube + containing the weighted mean across the chosen coord """ if not isinstance(cube, iris.cube.Cube): - raise ValueError('The first argument must be an instance of ' + - 'iris.cube.Cube but is' + - ' {0:s}.'.format(type(cube))) + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube but is' + ' {0:s}.'.format(type(cube))) + raise ValueError(msg) if not cube.coords(self.coord): - raise ValueError('The coord for this plugin must be ' + - 'an existing coordinate in the input cube.') + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube.') + raise ValueError(msg) # Find the coords dimension. # If coord is a scalar_coord try adding it. collapse_dim = cube.coord_dims(self.coord) if not collapse_dim: - msg = ('Could not find collapse dimension, ' + + msg = ('Could not find collapse dimension, ' 'will try adding it') warnings.warn(msg) cube = iris.util.new_axis(cube, self.coord) @@ -92,8 +95,13 @@ def process(self, cube, weights=None): weights_array = None if weights is not None: if np.array(weights).shape != cube.coord(self.coord).points.shape: - raise ValueError('The weights array must match the shape ' + - 'of the coordinate in the input cube') + msg = ('The weights array must match the shape ' + 'of the coordinate in the input cube; ' + 'weight shape is ' + '{0:s}'.format(np.array(weights).shape) + + ', cube shape is ' + '{0:s}'.format(cube.coord(self.coord).points.shape)) + raise ValueError(msg) weights_array = iris.util.broadcast_to_shape(np.array(weights), cube.shape, collapse_dim) From 2cbb612a5adc0465459bc3bd78566c66e2a7109d Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Tue, 30 May 2017 13:32:38 +0100 Subject: [PATCH 0059/1367] Adding revised plugin to calculate weights and unit tests --- ...test_weights_ChooseDefaultWeightsLinear.py | 191 ++++++++++++++++++ ...t_weights_ChooseDefaultWeightsNonLinear.py | 172 ++++++++++++++++ lib/improver/weights.py | 77 ++++--- 3 files changed, 417 insertions(+), 23 deletions(-) create mode 100644 lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py create mode 100644 lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py diff --git a/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py b/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py new file mode 100644 index 0000000000..9db67e679d --- /dev/null +++ b/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py @@ -0,0 +1,191 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for the weights.ChooseDefaultWeightsLinear plugin.""" + + +import unittest + +from cf_units import Unit +from iris.coords import AuxCoord, DimCoord +from iris.cube import Cube +from iris.tests import IrisTest +import iris +import numpy as np + +from improver.weights import ChooseDefaultWeightsLinear as LinearWeights + + +def add_realizations(cube, num): + """ Create num realizations of input cube + Args: + cube =iris.cube.Cube - input cube + num = integer - Number of realizations + Returns + cubeout = iris.cube.Cube - copy of cube with num realizations added + """ + cubelist = iris.cube.CubeList() + for i in range(0, num): + newcube = cube.copy() + new_ensemble_coord = iris.coords.AuxCoord(i, + standard_name='realization') + newcube.add_aux_coord(new_ensemble_coord) + cubelist.append(newcube) + cubeout = cubelist.merge_cube() + return cubeout + + +class TestChooseDefaultWeightsLinear(IrisTest): + """ Test the Default Linear Weights plugin """ + + def setUp(self): + data = np.zeros((2, 2, 2)) + data[0][:][:] = 0.0 + data[1][:][:] = 1.0 + cube = Cube(data, standard_name="precipitation_amount", + units="kg m^-2 s^-1") + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', + units='degrees'), 1) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, 2), 'longitude', + units='degrees'), 2) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_aux_coord(AuxCoord([402192.5, 402193.5], + "time", units=tunit), 0) + dummy_scalar_coord = iris.coords.AuxCoord(1, + long_name='scalar_coord', + units='no_unit') + cube.add_aux_coord(dummy_scalar_coord) + self.cube = cube + + def test_basic(self): + """ Test that the plugin retuns an array of weights """ + coord = "time" + plugin = LinearWeights() + result = plugin.process(self.cube, coord) + self.assertIsInstance(result, np.ndarray) + + def test_array_sum_equals_one(self): + """ Test that the resulting weights add up to one """ + coord = "time" + plugin = LinearWeights() + result = plugin.process(self.cube, coord) + self.assertAlmostEquals(result.sum(), 1.0) + + def test_fails_coord_not_in_cube(self): + """Test it raises a Value Error if coord not in the cube.""" + coord = "notset" + plugin = LinearWeights() + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, coord) + + def test_fails_input_not_a_cube(self): + """Test it raises a Value Error if not supplied with a cube.""" + coord = "time" + plugin = LinearWeights() + notacube = 0.0 + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(notacube, coord) + + def test_fails_y0val_lessthan_zero(self): + """ Test it raises a Value Error if y0val less than zero """ + coord = "time" + plugin = LinearWeights(y0val=-10.0) + msg = ('y0val must be a float > 0.0') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, coord) + + def test_fails_ynval_and_slope_set(self): + """ Test it raises a Value Error if slope and ynval set """ + coord = "time" + plugin = LinearWeights(y0val=10.0, slope=-5.0, ynval=5.0) + msg = ('Relative end point weight or slope must be set' + ' but not both.') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, coord) + + def test_fails_weights_negative(self): + """ Test it raises a Value Error if weights become negative """ + coord = "realization" + plugin = LinearWeights(y0val=10.0, slope=-5.0) + cubenew = add_realizations(self.cube, 6) + msg = 'Weights must be positive, at least one value < 0.0' + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(cubenew, coord) + + def test_works_scalar_coord(self): + """Test it works if scalar coordinate.""" + coord = 'scalar_coord' + plugin = LinearWeights() + result = plugin.process(self.cube, coord) + self.assertArrayAlmostEqual(result, np.array([1.0])) + + def test_works_defaults_used(self): + """Test it works if scalar coordinate.""" + coord = "time" + plugin = LinearWeights() + result = plugin.process(self.cube, coord) + expected_result = np.array([0.90909091, 0.09090909]) + self.assertArrayAlmostEqual(result, expected_result) + + def test_works_y0val_and_slope_set(self): + """Test it works if y0val and slope_set.""" + coord = "time" + plugin = LinearWeights(y0val=10.0, slope=-5.0) + result = plugin.process(self.cube, coord) + expected_result = np.array([0.66666667, 0.33333333]) + self.assertArrayAlmostEqual(result, expected_result) + + def test_works_y0val_and_ynval_set(self): + """Test it works if scalar coordinate.""" + coord = "time" + plugin = LinearWeights(y0val=10.0, ynval=5.0) + result = plugin.process(self.cube, coord) + expected_result = np.array([0.66666667, 0.33333333]) + self.assertArrayAlmostEqual(result, expected_result) + + def test_works_with_larger_num(self): + """Test it works with larger num_of_vals""" + coord = "realization" + plugin = LinearWeights(y0val=10.0, ynval=5.0) + cubenew = add_realizations(self.cube, 6) + result = plugin.process(cubenew, coord) + expected_result = np.array([0.22222222, 0.2, + 0.17777778, 0.15555556, + 0.13333333, 0.11111111]) + self.assertArrayAlmostEqual(result, expected_result) + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py b/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py new file mode 100644 index 0000000000..59c36ba9ab --- /dev/null +++ b/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +"""Unit tests for the weights.ChooseDefaultWeightsLinear plugin.""" + + +import unittest + +from cf_units import Unit +from iris.coords import AuxCoord, DimCoord +from iris.cube import Cube +from iris.tests import IrisTest +import iris +import numpy as np + +from improver.weights import ChooseDefaultWeightsNonLinear as NonLinearWeights + + +def add_realizations(cube, num): + """ Create num realizations of input cube + Args: + cube =iris.cube.Cube - input cube + num = integer - Number of realizations + Returns + cubeout = iris.cube.Cube - copy of cube with num realizations added + """ + cubelist = iris.cube.CubeList() + for i in range(0, num): + newcube = cube.copy() + new_ensemble_coord = iris.coords.AuxCoord(i, + standard_name='realization') + newcube.add_aux_coord(new_ensemble_coord) + cubelist.append(newcube) + cubeout = cubelist.merge_cube() + return cubeout + + +class TestChooseDefaultWeightsNonLinear(IrisTest): + """ Test the Default non-Linear Weights plugin """ + + def setUp(self): + data = np.zeros((2, 2, 2)) + data[0][:][:] = 0.0 + data[1][:][:] = 1.0 + cube = Cube(data, standard_name="precipitation_amount", + units="kg m^-2 s^-1") + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 2), 'latitude', + units='degrees'), 1) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, 2), 'longitude', + units='degrees'), 2) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_aux_coord(AuxCoord([402192.5, 402193.5], + "time", units=tunit), 0) + dummy_scalar_coord = iris.coords.AuxCoord(1, + long_name='scalar_coord', + units='no_unit') + cube.add_aux_coord(dummy_scalar_coord) + self.cube = cube + + def test_basic(self): + """ Test that the plugin retuns an array of weights """ + coord = "time" + plugin = NonLinearWeights() + result = plugin.process(self.cube, coord) + self.assertIsInstance(result, np.ndarray) + + def test_array_sum_equals_one(self): + """ Test that the resulting weights add up to one """ + coord = "time" + plugin = NonLinearWeights() + result = plugin.process(self.cube, coord) + self.assertAlmostEquals(result.sum(), 1.0) + + def test_fails_coord_not_in_cube(self): + """Test it raises a Value Error if coord not in the cube.""" + coord = "notset" + plugin = NonLinearWeights() + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, coord) + + def test_fails_input_not_a_cube(self): + """Test it raises a Value Error if not supplied with a cube.""" + coord = "time" + plugin = NonLinearWeights() + notacube = 0.0 + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(notacube, coord) + + def test_fails_if_cval_not_valid(self): + """Test it raises a Value Error if cval is not in range, + cval must be greater than 0.0 and less + than or equal to 1.0 + """ + coord = "time" + plugin = NonLinearWeights(cval=-1.0) + msg = ('cval must be greater than 0.0 and less ' + 'than or equal to 1.0 ') + with self.assertRaisesRegexp(ValueError, msg): + plugin.process(self.cube, coord) + plugin2 = NonLinearWeights(cval=1.1) + with self.assertRaisesRegexp(ValueError, msg): + plugin2.process(self.cube, coord) + + def test_works_if_scalar_coord(self): + """Test it works if scalar coordinate.""" + coord = "scalar_coord" + plugin = NonLinearWeights() + result = plugin.process(self.cube, coord) + self.assertArrayAlmostEqual(result, np.array([1.0])) + + def test_works_with_default_cval(self): + """Test it works with default cval.""" + coord = "time" + plugin = NonLinearWeights() + result = plugin.process(self.cube, coord) + expected_result = np.array([0.54054054, 0.45945946]) + self.assertArrayAlmostEqual(result, expected_result) + + def test_works_with_cval_equal_one(self): + """Test it works with cval = 1.0, i.e. equal weights.""" + coord = "time" + plugin = NonLinearWeights(cval=1.0) + result = plugin.process(self.cube, coord) + expected_result = np.array([0.5, 0.5]) + self.assertArrayAlmostEqual(result, expected_result) + + def test_works_with_larger_num(self): + """Test it works with larger num_of_vals""" + coord = "realization" + plugin = NonLinearWeights(cval=0.5) + cubenew = add_realizations(self.cube, 6) + result = plugin.process(cubenew, coord) + expected_result = np.array([0.50793651, 0.25396825, + 0.12698413, 0.06349206, + 0.03174603, 0.01587302]) + self.assertArrayAlmostEqual(result, expected_result) + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/weights.py b/lib/improver/weights.py index 6d8b3cb22f..d1a51298be 100644 --- a/lib/improver/weights.py +++ b/lib/improver/weights.py @@ -44,11 +44,15 @@ def normalise_weights(weights): Returns: normalised_weights : array of weights where sum = 1.0. - """ + if weights.min() < 0.0: + msg = 'Weights must be positive, at least one value < 0.0' + raise ValueError(msg) + sumval = weights.sum() if sumval == 0: - raise ValueError('Sum of weights must be > 0.0') + msg = 'Sum of weights must be > 0.0' + raise ValueError(msg) normalised_weights = weights / sumval return normalised_weights @@ -73,10 +77,15 @@ def nonlinear_weights(num_of_weights, cval): """ if not isinstance(num_of_weights, int) or num_of_weights <= 0: - raise ValueError('Number of weights must be integer > 0') + msg = ('Number of weights must be integer > 0, ' + 'num = {0:s}'.format(str(num_of_weights))) + raise ValueError(msg) if cval <= 0.0 or cval > 1.0: - raise ValueError('cval must be greater than 0.0 and less ' + - 'than or equal to 1.0') + msg = ('cval must be greater than 0.0 and less ' + 'than or equal to 1.0 ' + 'cval = {0:s}'.format(str(cval))) + raise ValueError(msg) + weights_list = [] for tval_minus1 in range(0, num_of_weights): weights_list.append(cval**(tval_minus1)) @@ -91,8 +100,9 @@ def linear_weights(num_of_weights, y0val=1.0, slope=0.0, """Create linear weights Args: - num_of_weights : Positive Integer - Number of weights to create. - y0val = float: relative value of starting point. Default = 1.0 + num_of_weights : Positive Integer: Number of weights to create. + y0val = positive float: + relative value of starting point. Default = 1.0 AND EITHER: slope = float: slope of the line. Default = 0.0 (equal weights) @@ -105,13 +115,25 @@ def linear_weights(num_of_weights, y0val=1.0, slope=0.0, """ if not isinstance(num_of_weights, int) or num_of_weights <= 0: - raise ValueError('Number of weights must be interger > 0') + msg = ('Number of weights must be integer > 0 ' + 'num = {0:s}'.format(str(num_of_weights))) + raise ValueError(msg) + # Special case num_of_weighs == 1 i.e. Scalar coordinate. + if num_of_weights == 1: + weights = np.array([1.0]) + return weights + if not isinstance(y0val, float) or y0val <= 0.0: + msg = ('y0val must be a float > 0.0, ' + 'y0val = {0:s}'.format(str(y0val))) + raise ValueError(msg) if ynval is not None: if slope != 0.0: - raise ValueError('Relative end weight or slope must be set' + - ' but not both.') + msg = ('Relative end point weight or slope must be set' + ' but not both.') + raise ValueError(msg) else: slope = (ynval - y0val)/(num_of_weights - 1.0) + weights_list = [] for tval in range(0, num_of_weights): weights_list.append(slope*tval + y0val) @@ -126,7 +148,7 @@ class ChooseDefaultWeightsLinear(object): def __init__(self, y0val=None, slope=0.0, ynval=None): """Set up for calculating default weights using linear function - y0val = None or flaot: relative value of starting point. + y0val = None or positive float: relative value of starting point. slope = float: slope of the line. Default = 0.0 (equal weights) ynval = float or None: relative weights of last point. Default value is None @@ -136,6 +158,8 @@ def __init__(self, y0val=None, slope=0.0, ynval=None): If y0val value is not set or set to None then the code assumes that the ultimate default values of y0val = 20.0 and ynval = 2.0 are required + + equal weights when slope = 0.0 or y0val = ynval """ self.slope = slope self.ynval = ynval @@ -157,12 +181,16 @@ def process(self, cube, coord): weights : array of weights, sum of all weights = 1.0 """ if not isinstance(cube, iris.cube.Cube): - raise ValueError('The first argument must be an instance of ' + - 'iris.cube.Cube but is' + - ' {0:s}'.format(type(cube))) - if not cube.coord(coord): - raise ValueError('The coord for this plugin must be ' + - 'an existing coordinate in the input cube') + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube but is' + ' {0:s}'.format(type(cube))) + raise ValueError(msg) + + if not cube.coords(coord): + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube') + raise ValueError(msg) + num_of_weights = len(cube.coord(coord).points) weights = linear_weights(num_of_weights, y0val=self.y0val, @@ -187,6 +215,7 @@ def __init__(self, cval=0.85): """Set up for calculating default weights using lnon-inear function cval = float: value greater than 0, less than equal 1.0 default = 0.85 + equal weights when cval = 1.0 """ self.cval = cval @@ -202,12 +231,14 @@ def process(self, cube, coord): weights : array of weights, sum of all weights = 1.0 """ if not isinstance(cube, iris.cube.Cube): - raise ValueError('The first argument must be an instance of ' + - 'iris.cube.Cube but is' + - ' {0:s}'.format(type(cube))) - if not cube.coord(coord): - raise ValueError('The coord for this plugin must be ' + - 'an existing coordinate in the input cube') + msg = ('The first argument must be an instance of ' + 'iris.cube.Cube but is' + ' {0:s}'.format(type(cube))) + raise ValueError(msg) + if not cube.coords(coord): + msg = ('The coord for this plugin must be ' + 'an existing coordinate in the input cube') + raise ValueError(msg) num_of_weights = len(cube.coord(coord).points) weights = nonlinear_weights(num_of_weights, cval=self.cval) From 907c0ce5db8f77963e79149cb22c9148f252c060 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 11 May 2017 16:20:02 +0100 Subject: [PATCH 0060/1367] First attempt at configuring radii to vary by lead time. This includes unit tests and plugins changes. Changes to the command line interface are dependent upon #5. --- bin/improver-nbhood | 23 +- lib/improver/nbhood.py | 180 ++++++++++--- .../helper_functions_ensemble_calibration.py | 8 +- ...est_nbhood_basicneighbourhoodprocessing.py | 252 +++++++++++++++--- 4 files changed, 388 insertions(+), 75 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 691b4c7aa4..2bb7ba1f34 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -41,11 +41,17 @@ from improver.nbhood import BasicNeighbourhoodProcessing def main(): """Load in arguments and get going.""" parser = argparse.ArgumentParser( - description='Apply basic weighted circle smoothing via ' + - 'the BasicNeighbourhoodProcessing plugin ' + - 'to a file with one cube.') - parser.add_argument('--radius-in-km', metavar='RADIUS', type=float, - help='The kernel radius for neighbourhood processing') + description='Apply basic weighted circle smoothing via ' + + 'the BasicNeighbourhoodProcessing plugin ' + + 'to a file with one cube.') + group = parser.add_mutually_exclusive_group() + group.add_argument('--radius-in-km', metavar='RADIUS', type=float, + help='The kernel radius for neighbourhood processing') + group.add_argument('--radius-in-km-by-lead-time', + metavar='RADIUS_BY_LEAD_TIME', nargs=2, + help='The kernel radii for neighbourhood processing' + 'and the associated lead times at which the radii are' + 'valid.') parser.add_argument('input_filepath', metavar='INPUT_FILE', help='A path to an input NetCDF file to be processed') parser.add_argument('output_filepath', metavar='OUTPUT_FILE', @@ -53,6 +59,13 @@ def main(): args = parser.parse_args() cube = iris.load_cube(args.input_filepath) result = BasicNeighbourhoodProcessing(args.radius_in_km).process(cube) + cube = iris.load_cube(args.input_filepath) + radius_in_km = args.radius_in_km_by_lead_time[0] + leadtimes = args.radius_in_km_by_lead_time[1] + result = ( + BasicNeighbourhoodProcessing( + radius_in_km=args.radius_in_km, + lead_times=lead_times).process(cube)) iris.save(result, args.output_filepath, unlimited_dimensions=[]) diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 5475fac41b..17bf0f7ef2 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -30,11 +30,15 @@ # POSSIBILITY OF SUCH DAMAGE. """Module containing neighbourhood processing utilities.""" - -import iris import numpy as np import scipy.ndimage.filters +import iris +from iris.exceptions import CoordinateNotFoundError + +from improver.ensemble_calibration.ensemble_calibration_utilities import ( + concatenate_cubes) + class BasicNeighbourhoodProcessing(object): """ @@ -55,7 +59,7 @@ class BasicNeighbourhoodProcessing(object): # Max extent of kernel in grid cells. MAX_KERNEL_CELL_RADIUS = 500 - def __init__(self, radius_in_km, unweighted_mode=False): + def __init__(self, radii_in_km, lead_times=None, unweighted_mode=False): """ Create a neighbourhood processing plugin that applies a smoothing kernel to points in a cube. @@ -63,8 +67,8 @@ def __init__(self, radius_in_km, unweighted_mode=False): Parameters ---------- - radius_in_km : float - The radius in kilometres of the neighbourhood kernel to + radii_in_km : float + The radii in kilometres of the neighbourhood kernel to apply. Rounded up to convert into integer number of grid points east and north, based on the characteristic spacing at the zero indices of the cube projection-x/y coords. @@ -75,17 +79,68 @@ def __init__(self, radius_in_km, unweighted_mode=False): weighting decreasing with radius. """ - self.radius_in_km = float(radius_in_km) + if isinstance(radii_in_km, list): + self.radii_in_km = map(float, radii_in_km) + else: + self.radii_in_km = float(radii_in_km) + self.lead_times = lead_times self.unweighted_mode = bool(unweighted_mode) def __str__(self): - result = ('') return result.format( - self.radius_in_km, self.unweighted_mode) + self.radii_in_km, self.unweighted_mode) + + def find_required_lead_times(self, cube): + """ + Determine the lead times within a cube, either by reading the + forecast_period coordinate, or by calculating the difference between + the time and the forecast_reference_time. + + Parameters + ---------- + cube : Iris.cube.Cube + Cube from which the lead times will be determined. + + Returns + ------- + cube : Iris.cube.Cube + Cube after applying a kernel, so that the resulting field is + smoothed. + + """ + if cube.coords("forecast_period"): + required_lead_times = cube.coord("forecast_period").points + else: + if cube.coords("time") and cube.coords("forecast_reference_time"): + required_lead_times = ( + cube.coord("time").points - + cube.coord("forecast_reference_time").points) + else: + msg = ("The forecast period coordinate is not available " + "within {}." + "The time coordinate and forecast_reference_time " + "coordinate were also not available for calculating " + "the forecast_period.".format(cube)) + raise CoordinateNotFoundError(msg) + return required_lead_times + + def get_grid_x_y_kernel_ranges(self, cube, radii_in_km): + """ + Return the number of grid cells in the x and y direction + to be used to create the kernel. - def get_grid_x_y_kernel_ranges(self, cube): - """Return grid cell numbers east and north for the kernel.""" + Parameters + ---------- + cube : Iris.cube.Cube + Cube containing the x and y coordinates, which will be used for + calculating the number of grid cells in the x and y direction, + which equates to the size of the desired radii. + radii_in_km : Float + Radius in kilometres for use in specifying the number of + grid cells used to create a kernel. + """ try: x_coord = cube.coord("projection_x_coordinate").copy() y_coord = cube.coord("projection_y_coordinate").copy() @@ -95,67 +150,118 @@ def get_grid_x_y_kernel_ranges(self, cube): y_coord.convert_units("metres") d_north_metres = y_coord.points[1] - y_coord.points[0] d_east_metres = x_coord.points[1] - x_coord.points[0] - grid_cells_y = int(self.radius_in_km * 1000 / abs(d_north_metres)) - grid_cells_x = int(self.radius_in_km * 1000 / abs(d_east_metres)) + grid_cells_y = int(radii_in_km * 1000 / abs(d_north_metres)) + grid_cells_x = int(radii_in_km * 1000 / abs(d_east_metres)) if grid_cells_x == 0 or grid_cells_y == 0: raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(self.radius_in_km) + + "{0} km ".format(self.radii_in_km[0]) + "gives zero cell extent") ) if (grid_cells_x > self.MAX_KERNEL_CELL_RADIUS or grid_cells_y > self.MAX_KERNEL_CELL_RADIUS): raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(self.radius_in_km) + + "{0} km ".format(self.radii_in_km[0]) + "exceeds maximum grid cell extent") ) return grid_cells_x, grid_cells_y - def process(self, cube): + def apply_kernel_for_smoothing(self, cube, ranges): """ - Set the specified name and units metadata to the cube from the upstream - plugin. + Return the number of grid cells in the x and y direction + to be used to create the kernel. - Returns - ------- - Cube - The cube from the upstream plugin with name and units metadata - applied. + Parameters + ---------- + cube : Iris.cube.Cube + Cube containing the x and y coordinates, which will be used for + calculating the number of grid cells in the x and y direction, + which equates to the size of the desired radii. + ranges : Tuple + Number of grid cells in the x and y direction used to create + the kernel. """ - try: - realiz_coord = cube.coord('realization') - except iris.exceptions.CoordinateNotFoundError: - pass - else: - if len(realiz_coord.points) > 1: - raise ValueError("Does not operate across realizations.") - if np.isnan(cube.data).any(): - raise ValueError("Error: NaN detected in input cube data") data = cube.data - ranges = self.get_grid_x_y_kernel_ranges(cube) - fullranges = np.zeros([np.rank(data)]) + fullranges = np.zeros([np.ndim(data)]) axes = [] for coord_name in ['projection_x_coordinate', 'projection_y_coordinate']: axes.append(cube.coord_dims(coord_name)[0]) for axis_index, axis in enumerate(axes): fullranges[axis] = ranges[axis_index] - kernel = np.ones([1 + x * 2 for x in fullranges]) - n = np.ogrid[tuple([slice(-x, x+1) for x in ranges])] + # Define the size of the kernel based on the number of grid cells + # contained within the desired radius. + kernel = np.ones([int(1 + x * 2) for x in fullranges]) + # Create an open multi-dimensional meshgrid. + meshgrid = np.ogrid[tuple([slice(-x, x+1) for x in ranges])] if self.unweighted_mode: mask = np.reshape( - np.sum([x ** 2 for x in n]) > np.cumprod(ranges)[-1], + np.sum([x ** 2 for x in meshgrid]) > np.cumprod(ranges)[-1], np.shape(kernel) ) else: + # Create a kernel, such that the central grid point has the + # highest weighting, with the weighting decreasing with distance + # away from the central grid point. kernel[:] = ( - (np.cumprod(ranges)[-1] - np.sum([x**2. for x in n])) / + (np.cumprod(ranges)[-1] - np.sum([x**2. for x in meshgrid])) / np.cumprod(ranges)[-1] ) mask = kernel < 0. kernel[mask] = 0. + # Smooth the data by applying the kernel. cube.data = scipy.ndimage.filters.correlate( data, kernel, mode='nearest') / np.sum(kernel) return cube + + def process(self, cube): + """ + Calculate a kernel to apply, in order to smooth the input cube. + + Parameters + ---------- + cube : Iris.cube.Cube + Cube to apply a kernel to, in order to generate a smoother field. + + Returns + ------- + cube : Iris.cube.Cube + Cube after applying a kernel, so that the resulting field is + smoothed. + + """ + try: + realiz_coord = cube.coord('realization') + except iris.exceptions.CoordinateNotFoundError: + pass + else: + if len(realiz_coord.points) > 1: + raise ValueError("Does not operate across realizations.") + if np.isnan(cube.data).any(): + raise ValueError("Error: NaN detected in input cube data") + + if self.lead_times is None: + radii_in_km = self.radii_in_km + ranges = self.get_grid_x_y_kernel_ranges(cube, radii_in_km) + cube = self.apply_kernel_for_smoothing(cube, ranges) + else: + required_lead_times = self.find_required_lead_times(cube) + # Interpolate to find the radius at each required lead time. + required_radii_in_km = ( + np.interp( + required_lead_times, self.lead_times, self.radii_in_km)) + cubes = iris.cube.CubeList([]) + # Find the number of grid cells required for creating the kernel, + # and then apply the kernel to smooth the field. + for cube_slice, radii_in_km in ( + zip(cube.slices_over("time"), required_radii_in_km)): + ranges = self.get_grid_x_y_kernel_ranges( + cube_slice, radii_in_km) + cube_slice = ( + self.apply_kernel_for_smoothing(cube_slice, ranges)) + cube_slice = iris.util.new_axis(cube_slice, "time") + cubes.append(cube_slice) + cube = concatenate_cubes(cubes) + return cube diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 45a50d0779..dadff94fb7 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -87,15 +87,19 @@ def _add_forecast_reference_time_and_forecast_period( to the input cube. """ cube.coord("time").points = time_point + coord_position = cube.coord_dims("time") + if not isinstance(fp_point, list): + fp_point = [fp_point] fp_points = fp_point - frt_points = cube.coord("time").points[0] - fp_points + frt_points = cube.coord("time").points[0] - fp_points[0] time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) cube.add_aux_coord( DimCoord([frt_points], "forecast_reference_time", units=tunit)) cube.add_aux_coord( - DimCoord([fp_points], "forecast_period", units="hours")) + DimCoord(fp_points, "forecast_period", units="hours"), + data_dims=coord_position) return cube diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index 923b4568c1..c1a6481535 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -34,15 +34,19 @@ import unittest from cf_units import Unit +import iris from iris.coords import AuxCoord, DimCoord from iris.coord_systems import OSGB from iris.cube import Cube +from iris.exceptions import CoordinateNotFoundError from iris.tests import IrisTest import numpy as np from improver.grids.osgb import OSGBGRID from improver.nbhood import BasicNeighbourhoodProcessing as NBHood +from improver.tests.helper_functions_ensemble_calibration import ( + _add_forecast_reference_time_and_forecast_period) SINGLE_POINT_RANGE_3_CENTROID = np.array([ @@ -163,7 +167,59 @@ def set_up_cube_lat_long(zero_point_indices=((0, 7, 7),), num_time_points=1, return cube -class Test_operation_radius_to_grid_cells(IrisTest): +class Test_find_required_lead_times(IrisTest): + + """Test determining of the lead times present within the input cube.""" + + RADIUS_IN_KM = 6.1 + + def test_basic(self): + """Test that a list is returned.""" + cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin.find_required_lead_times(cube) + self.assertIsInstance(result, np.ndarray) + + def test_check_coordinate(self): + """ + Test that the data within the list is as expected, when + the input cube has a forecast_period coordinate. + """ + cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + expected_result = cube.coord("forecast_period").points + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin.find_required_lead_times(cube) + self.assertArrayAlmostEqual(result, expected_result) + + def test_check_coordinate_without_forecast_period(self): + """ + Test that the data within the list is as expected, when + the input cube has a time coordinate and a forecast_reference_time + coordinate. + """ + cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + cube.remove_coord("forecast_period") + expected_result = ( + cube.coord("time").points - + cube.coord("forecast_reference_time").points) + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin.find_required_lead_times(cube) + self.assertArrayAlmostEqual(result, expected_result) + + def test_exception_raised(self): + """ + Test that a CoordinateNotFoundError exception is raised if the + forecast_period, or the time and forecast_reference_time, + are not present. + """ + cube = set_up_cube() + plugin = NBHood(self.RADIUS_IN_KM) + msg = "The forecast period coordinate is not available" + with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + plugin.find_required_lead_times(cube) + + +class Test_get_grid_x_y_kernel_ranges(IrisTest): """Test conversion of kernel radius in kilometres to grid cells.""" @@ -186,7 +242,7 @@ def test_basic_radius_to_grid_cells_km_grid(self): self.assertEqual(result, (3, 3)) -class Test_operation_neighbourhooding(IrisTest): +class Test_apply_kernel_for_smoothing(IrisTest): """Test neighbourhood processing plugin on the OS National Grid.""" @@ -196,7 +252,7 @@ def test_basic(self): """Test that the plugin returns an iris.cube.Cube.""" cube = set_up_cube() plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.process(cube) + result = plugin.apply_kernel_for_smoothing(cube) self.assertIsInstance(result, Cube) def test_single_point(self): @@ -205,7 +261,7 @@ def test_single_point(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_flat(self): @@ -221,7 +277,10 @@ def test_single_point_flat(self): for index, slice_ in enumerate(SINGLE_POINT_RANGE_2_CENTROID_FLAT): expected[0][5 + index][5:10] = slice_ radius_in_km = 4.2 # Equivalent to a range of 2. - result = NBHood(radius_in_km, unweighted_mode=True).process(cube) + result = ( + NBHood( + radius_in_km, + unweighted_mode=True).apply_kernel_for_smoothing(cube)) self.assertArrayAlmostEqual(result.data, expected) def test_multi_point_multitimes(self): @@ -235,24 +294,16 @@ def test_multi_point_multitimes(self): expected[0][8 + index][8:13] = slice_ for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[1][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) - def test_single_point_nan(self): - """Test behaviour for a single NaN grid cell.""" - cube = set_up_cube() - cube.data[0][6][7] = np.NAN - msg = "NaN detected in input cube data" - with self.assertRaisesRegexp(ValueError, msg): - NBHood(self.RADIUS_IN_KM).process(cube) - def test_single_point_lat_long(self): """Test behaviour for a single grid cell on lat long grid.""" cube = set_up_cube_lat_long() msg = "Invalid grid: projection_x/y coords required" expected = np.zeros_like(cube.data) with self.assertRaisesRegexp(ValueError, msg): - NBHood(self.RADIUS_IN_KM).process(cube) + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) def test_single_point_masked_to_null(self): """Test behaviour with a masked non-zero point. @@ -270,7 +321,7 @@ def test_single_point_masked_to_null(self): for time_index in range(len(expected)): for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[time_index][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_masked_other_point(self): @@ -287,7 +338,7 @@ def test_single_point_masked_other_point(self): for time_index in range(len(expected)): for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[time_index][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_negative(self): @@ -295,7 +346,7 @@ def test_single_point_range_negative(self): cube = set_up_cube() msg = "negative dimensions are not allowed" with self.assertRaisesRegexp(ValueError, msg): - NBHood(-1.0 * self.RADIUS_IN_KM).process(cube) + NBHood(-1.0 * self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) def test_single_point_range_0(self): """Test behaviour with a non-zero point with zero range.""" @@ -303,7 +354,7 @@ def test_single_point_range_0(self): msg = "radius of 0.005 km gives zero cell extent" with self.assertRaisesRegexp(ValueError, msg): expected = np.zeros_like(cube.data) - NBHood(0.005).process(cube) + NBHood(0.005).apply_kernel_for_smoothing(cube) def test_single_point_range_1(self): """Test behaviour with a non-zero point with unit range.""" @@ -311,7 +362,7 @@ def test_single_point_range_1(self): expected = np.ones_like(cube.data) expected[0][7][7] = 0.0 radius_in_km = 2.1 # Equivalent to a range of 1 grid cell. - result = NBHood(radius_in_km).process(cube) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5(self): @@ -322,7 +373,7 @@ def test_single_point_range_5(self): for index, slice_ in enumerate(SINGLE_POINT_RANGE_5_CENTROID): expected[time_index][3 + index][3:12] = slice_ radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. - result = NBHood(radius_in_km).process(cube) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5_small_domain(self): @@ -339,7 +390,7 @@ def test_single_point_range_5_small_domain(self): [0.97944502, 0.97841727, 0.97944502, 0.98252826]] ]) radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. - result = NBHood(radius_in_km).process(cube) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_lots(self): @@ -348,7 +399,7 @@ def test_single_point_range_lots(self): msg = "radius of 500000.0 km exceeds maximum grid cell extent" with self.assertRaisesRegexp(ValueError, msg): expected = np.zeros_like(cube.data) - NBHood(500000.0).process(cube) + NBHood(500000.0).apply_kernel_for_smoothing(cube) def test_point_pair(self): """Test behaviour for two nearby non-zero grid cells.""" @@ -364,7 +415,7 @@ def test_point_pair(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(expected_snippet): expected[0][5 + index][4:11] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_edge(self): @@ -374,7 +425,7 @@ def test_single_point_almost_edge(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][0:5] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_edge(self): @@ -384,7 +435,7 @@ def test_single_point_adjacent_edge(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][0:4] = slice_[1:] - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_edge(self): @@ -407,7 +458,7 @@ def test_single_point_on_edge(self): ]) for index, slice_ in enumerate(expected_centroid): expected[0][5 + index][0:3] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_corner(self): @@ -417,7 +468,7 @@ def test_single_point_almost_corner(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][index][0:5] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_corner(self): @@ -429,7 +480,7 @@ def test_single_point_adjacent_corner(self): if index == 0: continue expected[0][index - 1][0:4] = slice_[1:] - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_corner(self): @@ -450,9 +501,31 @@ def test_single_point_on_corner(self): ]) for index, slice_ in enumerate(expected_centroid): expected[0][index][0:3] = slice_ - result = NBHood(self.RADIUS_IN_KM).process(cube) + result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) self.assertArrayAlmostEqual(result.data, expected) + +class Test_process(IrisTest): + + """Tests for the process method of BasicNeighbourhoodProcessing.""" + + RADIUS_IN_KM = 6.3 # Gives 3 grid cells worth. + + def test_basic(self): + """Test that the plugin returns an iris.cube.Cube.""" + cube = set_up_cube() + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin.process(cube) + self.assertIsInstance(result, Cube) + + def test_single_point_nan(self): + """Test behaviour for a single NaN grid cell.""" + cube = set_up_cube() + cube.data[0][6][7] = np.NAN + msg = "NaN detected in input cube data" + with self.assertRaisesRegexp(ValueError, msg): + NBHood(self.RADIUS_IN_KM).process(cube) + def test_fail_multiple_realisations(self): """Test failing when the array has a realisation dimension.""" data = np.ones((14, 1, 16, 16)) @@ -489,7 +562,124 @@ def test_fail_multiple_realisations(self): standard_name="realization"), 0) msg = "Does not operate across realizations" with self.assertRaisesRegexp(ValueError, msg): - plugin = NBHood(self.RADIUS_IN_KM).process(cube) + plugin = ( + NBHood(self.RADIUS_IN_KM).process(cube)) + + def test_radii_varying_with_lead_time(self): + """ + Test that a cube is returned when the radius varies with lead time. + """ + cube = set_up_cube(num_time_points=3) + iris.util.promote_aux_coord_to_dim_coord(cube, "time") + time_points = cube.coord("time").points + fp_points = [2, 3, 4] + cube = _add_forecast_reference_time_and_forecast_period( + cube, time_point=time_points, fp_point=fp_points) + radii_in_km = [10, 20, 30] + lead_times = [2, 3, 4] + print "before nbhood cube = ", cube + plugin = NBHood(radii_in_km, lead_times) + result = plugin.process(cube) + self.assertIsInstance(result, Cube) + + def test_radii_varying_with_lead_time_check_data(self): + """ + Test that the expected data is produced when the radius + varies with lead time. + """ + cube = set_up_cube( + zero_point_indices=((0, 7, 7), (1, 7, 7,), (2, 7, 7)), + num_time_points=3) + print "cube.data = ", cube.data + expected = np.ones_like(cube.data) + expected[0, 6:9, 6:9] = ( + [0.91666667, 0.875, 0.91666667], + [0.875, 0.83333333, 0.875], + [0.91666667, 0.875, 0.91666667]) + + expected[1, 5:10, 5:10] = ( + [0.992, 0.968, 0.96, 0.968, 0.992], + [0.968, 0.944, 0.936, 0.944, 0.968], + [0.96, 0.936, 0.928, 0.936, 0.96], + [0.968, 0.944, 0.936, 0.944, 0.968], + [0.992, 0.968, 0.96, 0.968, 0.992]) + + expected[2, 4:11, 4:11] = ( + [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1], + [0.9925, 0.98, 0.9725, 0.97, 0.9725, 0.98, 0.9925], + [0.985, 0.9725, 0.965, 0.9625, 0.965, 0.9725, 0.985], + [0.9825, 0.97, 0.9625, 0.96, 0.9625, 0.97, 0.9825], + [0.985, 0.9725, 0.965, 0.9625, 0.965, 0.9725, 0.985], + [0.9925, 0.98, 0.9725, 0.97, 0.9725, 0.98, 0.9925], + [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1]) + + iris.util.promote_aux_coord_to_dim_coord(cube, "time") + time_points = cube.coord("time").points + fp_points = [2, 3, 4] + cube = _add_forecast_reference_time_and_forecast_period( + cube, time_point=time_points, fp_point=fp_points) + radii_in_km = [6, 8, 10] + lead_times = [2, 3, 4] + plugin = NBHood(radii_in_km, lead_times) + result = plugin.process(cube) + self.assertArrayAlmostEqual(result.data, expected) + + def test_radii_varying_with_lead_time_with_interpolation(self): + """ + Test that a cube is returned when the radius varies with lead time + and linearly interpolation is required, in order to . + """ + cube = set_up_cube(num_time_points=3) + iris.util.promote_aux_coord_to_dim_coord(cube, "time") + time_points = cube.coord("time").points + fp_points = [2, 3, 4] + cube = _add_forecast_reference_time_and_forecast_period( + cube, time_point=time_points, fp_point=fp_points) + radii_in_km = [10, 30] + lead_times = [2, 4] + print "before nbhood cube = ", cube + plugin = NBHood(radii_in_km, lead_times) + result = plugin.process(cube) + self.assertIsInstance(result, Cube) + + def test_radii_varying_with_lead_time_with_interpolation_check_data(self): + """Test behaviour when the radius varies with lead time.""" + cube = set_up_cube( + zero_point_indices=((0, 7, 7), (1, 7, 7,), (2, 7, 7)), + num_time_points=3) + print "cube.data = ", cube.data + expected = np.ones_like(cube.data) + expected[0, 6:9, 6:9] = ( + [0.91666667, 0.875, 0.91666667], + [0.875, 0.83333333, 0.875], + [0.91666667, 0.875, 0.91666667]) + + expected[1, 5:10, 5:10] = ( + [0.992, 0.968, 0.96, 0.968, 0.992], + [0.968, 0.944, 0.936, 0.944, 0.968], + [0.96, 0.936, 0.928, 0.936, 0.96], + [0.968, 0.944, 0.936, 0.944, 0.968], + [0.992, 0.968, 0.96, 0.968, 0.992]) + + expected[2, 4:11, 4:11] = ( + [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1], + [0.9925, 0.98, 0.9725, 0.97, 0.9725, 0.98, 0.9925], + [0.985, 0.9725, 0.965, 0.9625, 0.965, 0.9725, 0.985], + [0.9825, 0.97, 0.9625, 0.96, 0.9625, 0.97, 0.9825], + [0.985, 0.9725, 0.965, 0.9625, 0.965, 0.9725, 0.985], + [0.9925, 0.98, 0.9725, 0.97, 0.9725, 0.98, 0.9925], + [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1]) + + iris.util.promote_aux_coord_to_dim_coord(cube, "time") + time_points = cube.coord("time").points + fp_points = [2, 3, 4] + cube = _add_forecast_reference_time_and_forecast_period( + cube, time_point=time_points, fp_point=fp_points) + radii_in_km = [6, 10] + lead_times = [2, 4] + plugin = NBHood(radii_in_km, lead_times) + result = plugin.process(cube) + self.assertArrayAlmostEqual(result.data, expected) if __name__ == '__main__': From dfb6b3266d566c402a4cdf2fa7f8bf797ab88994 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 12 May 2017 14:50:49 +0100 Subject: [PATCH 0061/1367] Edits to improve docstrings and to refactor some of the unit tests following some refactoring of the plugin within the previous commit. --- bin/improver-nbhood | 6 +- lib/improver/nbhood.py | 49 +++++-- ...est_nbhood_basicneighbourhoodprocessing.py | 133 ++++++++++++------ 3 files changed, 127 insertions(+), 61 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 2bb7ba1f34..274881ca5b 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -58,13 +58,11 @@ def main(): help='The output path for the processed NetCDF') args = parser.parse_args() cube = iris.load_cube(args.input_filepath) - result = BasicNeighbourhoodProcessing(args.radius_in_km).process(cube) - cube = iris.load_cube(args.input_filepath) radius_in_km = args.radius_in_km_by_lead_time[0] - leadtimes = args.radius_in_km_by_lead_time[1] + lead_times = args.radius_in_km_by_lead_time[1] result = ( BasicNeighbourhoodProcessing( - radius_in_km=args.radius_in_km, + args.radius_in_km, lead_times=lead_times).process(cube)) iris.save(result, args.output_filepath, unlimited_dimensions=[]) diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 17bf0f7ef2..a969ccece9 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -67,12 +67,14 @@ def __init__(self, radii_in_km, lead_times=None, unweighted_mode=False): Parameters ---------- - radii_in_km : float + radii_in_km : float or List (if defining lead times) The radii in kilometres of the neighbourhood kernel to apply. Rounded up to convert into integer number of grid points east and north, based on the characteristic spacing at the zero indices of the cube projection-x/y coords. - + lead_times : List + List of lead times or forecast periods, at which the radii + within radii_in_km are defined. unweighted_mode : boolean If True, use a circle with constant weighting. If False, use a circle for neighbourhood kernel with @@ -105,9 +107,9 @@ def find_required_lead_times(self, cube): Returns ------- - cube : Iris.cube.Cube - Cube after applying a kernel, so that the resulting field is - smoothed. + required_lead_times : Numpy array + Array containing the lead times, at which the radii need to be + calculated. """ if cube.coords("forecast_period"): @@ -140,11 +142,21 @@ def get_grid_x_y_kernel_ranges(self, cube, radii_in_km): radii_in_km : Float Radius in kilometres for use in specifying the number of grid cells used to create a kernel. + + Returns + ------- + grid_cells_x : Integer + Number of grid cells in the x direction to be used to create the + kernel. + grid_cells_y : Integer + Number of grid cells in the y direction to be used to create the + kernel. + """ try: x_coord = cube.coord("projection_x_coordinate").copy() y_coord = cube.coord("projection_y_coordinate").copy() - except iris.exceptions.CoordinateNotFoundError: + except CoordinateNotFoundError: raise ValueError("Invalid grid: projection_x/y coords required") x_coord.convert_units("metres") y_coord.convert_units("metres") @@ -155,14 +167,20 @@ def get_grid_x_y_kernel_ranges(self, cube, radii_in_km): if grid_cells_x == 0 or grid_cells_y == 0: raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(self.radii_in_km[0]) + + "{0} km ".format(radii_in_km) + "gives zero cell extent") ) + elif grid_cells_x < 0 or grid_cells_y < 0: + raise ValueError( + ("Neighbourhood processing radius of " + + "{0} km ".format(radii_in_km) + + "gives a negative cell extent") + ) if (grid_cells_x > self.MAX_KERNEL_CELL_RADIUS or grid_cells_y > self.MAX_KERNEL_CELL_RADIUS): raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(self.radii_in_km[0]) + + "{0} km ".format(radii_in_km) + "exceeds maximum grid cell extent") ) return grid_cells_x, grid_cells_y @@ -182,13 +200,22 @@ def apply_kernel_for_smoothing(self, cube, ranges): Number of grid cells in the x and y direction used to create the kernel. + Returns + ------- + cube : Iris.cube.Cube + Cube containing the smoothed field after the kernel has been + applied. + """ data = cube.data fullranges = np.zeros([np.ndim(data)]) axes = [] - for coord_name in ['projection_x_coordinate', - 'projection_y_coordinate']: - axes.append(cube.coord_dims(coord_name)[0]) + try: + for coord_name in ['projection_x_coordinate', + 'projection_y_coordinate']: + axes.append(cube.coord_dims(coord_name)[0]) + except CoordinateNotFoundError: + raise ValueError("Invalid grid: projection_x/y coords required") for axis_index, axis in enumerate(axes): fullranges[axis] = ranges[axis_index] # Define the size of the kernel based on the number of grid cells diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index c1a6481535..eae09e904f 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -229,7 +229,7 @@ def test_basic_radius_to_grid_cells(self): """Test the lat-long radius-to-grid-cell conversion.""" cube = set_up_cube() plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.get_grid_x_y_kernel_ranges(cube) + result = plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) self.assertEqual(result, (3, 3)) def test_basic_radius_to_grid_cells_km_grid(self): @@ -238,9 +238,48 @@ def test_basic_radius_to_grid_cells_km_grid(self): cube.coord("projection_x_coordinate").convert_units("kilometres") cube.coord("projection_y_coordinate").convert_units("kilometres") plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.get_grid_x_y_kernel_ranges(cube) + result = plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) self.assertEqual(result, (3, 3)) + def test_single_point_lat_long(self): + """Test behaviour for a single grid cell on lat long grid.""" + cube = set_up_cube_lat_long() + plugin = NBHood(self.RADIUS_IN_KM) + msg = "Invalid grid: projection_x/y coords required" + expected = np.zeros_like(cube.data) + ranges = (3, 3) + with self.assertRaisesRegexp(ValueError, msg): + plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) + + def test_single_point_range_negative(self): + """Test behaviour with a non-zero point with negative range.""" + cube = set_up_cube() + radius_in_km = -1.0 * self.RADIUS_IN_KM + plugin = NBHood(radius_in_km) + msg = "radius of -6.1 km gives a negative cell extent" + with self.assertRaisesRegexp(ValueError, msg): + plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + + def test_single_point_range_0(self): + """Test behaviour with a non-zero point with zero range.""" + cube = set_up_cube() + radius_in_km = 0.005 + plugin = NBHood(radius_in_km) + msg = "radius of 0.005 km gives zero cell extent" + with self.assertRaisesRegexp(ValueError, msg): + expected = np.zeros_like(cube.data) + plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + + def test_single_point_range_lots(self): + """Test behaviour with a non-zero point with unhandleable range.""" + cube = set_up_cube() + radius_in_km = 500000.0 + plugin = NBHood(radius_in_km) + msg = "radius of 500000.0 km exceeds maximum grid cell extent" + with self.assertRaisesRegexp(ValueError, msg): + expected = np.zeros_like(cube.data) + plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + class Test_apply_kernel_for_smoothing(IrisTest): @@ -252,7 +291,8 @@ def test_basic(self): """Test that the plugin returns an iris.cube.Cube.""" cube = set_up_cube() plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = plugin.apply_kernel_for_smoothing(cube, ranges) self.assertIsInstance(result, Cube) def test_single_point(self): @@ -261,7 +301,9 @@ def test_single_point(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_flat(self): @@ -277,10 +319,12 @@ def test_single_point_flat(self): for index, slice_ in enumerate(SINGLE_POINT_RANGE_2_CENTROID_FLAT): expected[0][5 + index][5:10] = slice_ radius_in_km = 4.2 # Equivalent to a range of 2. + ranges = (2, 2) result = ( NBHood( radius_in_km, - unweighted_mode=True).apply_kernel_for_smoothing(cube)) + unweighted_mode=True).apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_multi_point_multitimes(self): @@ -294,7 +338,9 @@ def test_multi_point_multitimes(self): expected[0][8 + index][8:13] = slice_ for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[1][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_lat_long(self): @@ -302,8 +348,9 @@ def test_single_point_lat_long(self): cube = set_up_cube_lat_long() msg = "Invalid grid: projection_x/y coords required" expected = np.zeros_like(cube.data) + ranges = (3, 3) with self.assertRaisesRegexp(ValueError, msg): - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges) def test_single_point_masked_to_null(self): """Test behaviour with a masked non-zero point. @@ -321,7 +368,9 @@ def test_single_point_masked_to_null(self): for time_index in range(len(expected)): for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[time_index][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_masked_other_point(self): @@ -338,31 +387,19 @@ def test_single_point_masked_other_point(self): for time_index in range(len(expected)): for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[time_index][5 + index][5:10] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) - def test_single_point_range_negative(self): - """Test behaviour with a non-zero point with negative range.""" - cube = set_up_cube() - msg = "negative dimensions are not allowed" - with self.assertRaisesRegexp(ValueError, msg): - NBHood(-1.0 * self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) - - def test_single_point_range_0(self): - """Test behaviour with a non-zero point with zero range.""" - cube = set_up_cube() - msg = "radius of 0.005 km gives zero cell extent" - with self.assertRaisesRegexp(ValueError, msg): - expected = np.zeros_like(cube.data) - NBHood(0.005).apply_kernel_for_smoothing(cube) - def test_single_point_range_1(self): """Test behaviour with a non-zero point with unit range.""" cube = set_up_cube() expected = np.ones_like(cube.data) expected[0][7][7] = 0.0 radius_in_km = 2.1 # Equivalent to a range of 1 grid cell. - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) + ranges = (1, 1) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5(self): @@ -373,7 +410,8 @@ def test_single_point_range_5(self): for index, slice_ in enumerate(SINGLE_POINT_RANGE_5_CENTROID): expected[time_index][3 + index][3:12] = slice_ radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) + ranges = (5, 5) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5_small_domain(self): @@ -390,17 +428,10 @@ def test_single_point_range_5_small_domain(self): [0.97944502, 0.97841727, 0.97944502, 0.98252826]] ]) radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube) + ranges = (5, 5) + result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) self.assertArrayAlmostEqual(result.data, expected) - def test_single_point_range_lots(self): - """Test behaviour with a non-zero point with unhandleable range.""" - cube = set_up_cube() - msg = "radius of 500000.0 km exceeds maximum grid cell extent" - with self.assertRaisesRegexp(ValueError, msg): - expected = np.zeros_like(cube.data) - NBHood(500000.0).apply_kernel_for_smoothing(cube) - def test_point_pair(self): """Test behaviour for two nearby non-zero grid cells.""" cube = set_up_cube( @@ -415,7 +446,9 @@ def test_point_pair(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(expected_snippet): expected[0][5 + index][4:11] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_edge(self): @@ -425,7 +458,9 @@ def test_single_point_almost_edge(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][0:5] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_edge(self): @@ -435,7 +470,9 @@ def test_single_point_adjacent_edge(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][5 + index][0:4] = slice_[1:] - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_edge(self): @@ -458,7 +495,9 @@ def test_single_point_on_edge(self): ]) for index, slice_ in enumerate(expected_centroid): expected[0][5 + index][0:3] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_corner(self): @@ -468,7 +507,9 @@ def test_single_point_almost_corner(self): expected = np.ones_like(cube.data) for index, slice_ in enumerate(SINGLE_POINT_RANGE_3_CENTROID): expected[0][index][0:5] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_corner(self): @@ -480,7 +521,9 @@ def test_single_point_adjacent_corner(self): if index == 0: continue expected[0][index - 1][0:4] = slice_[1:] - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_corner(self): @@ -501,7 +544,9 @@ def test_single_point_on_corner(self): ]) for index, slice_ in enumerate(expected_centroid): expected[0][index][0:3] = slice_ - result = NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube) + ranges = (3, 3) + result = ( + NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) @@ -577,7 +622,6 @@ def test_radii_varying_with_lead_time(self): cube, time_point=time_points, fp_point=fp_points) radii_in_km = [10, 20, 30] lead_times = [2, 3, 4] - print "before nbhood cube = ", cube plugin = NBHood(radii_in_km, lead_times) result = plugin.process(cube) self.assertIsInstance(result, Cube) @@ -590,7 +634,6 @@ def test_radii_varying_with_lead_time_check_data(self): cube = set_up_cube( zero_point_indices=((0, 7, 7), (1, 7, 7,), (2, 7, 7)), num_time_points=3) - print "cube.data = ", cube.data expected = np.ones_like(cube.data) expected[0, 6:9, 6:9] = ( [0.91666667, 0.875, 0.91666667], @@ -637,7 +680,6 @@ def test_radii_varying_with_lead_time_with_interpolation(self): cube, time_point=time_points, fp_point=fp_points) radii_in_km = [10, 30] lead_times = [2, 4] - print "before nbhood cube = ", cube plugin = NBHood(radii_in_km, lead_times) result = plugin.process(cube) self.assertIsInstance(result, Cube) @@ -647,7 +689,6 @@ def test_radii_varying_with_lead_time_with_interpolation_check_data(self): cube = set_up_cube( zero_point_indices=((0, 7, 7), (1, 7, 7,), (2, 7, 7)), num_time_points=3) - print "cube.data = ", cube.data expected = np.ones_like(cube.data) expected[0, 6:9, 6:9] = ( [0.91666667, 0.875, 0.91666667], From 98dacc5c8fe65c2b6d7e9e5858fa9af6b01b3906 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 15 May 2017 10:33:14 +0100 Subject: [PATCH 0062/1367] Edits to add unit test for mismatch between number of radii and number of lead times. Edit to improver-nbhood script. --- bin/improver-nbhood | 12 +++++++---- lib/improver/nbhood.py | 8 +++++++- ...est_nbhood_basicneighbourhoodprocessing.py | 20 +++++++++++++++++++ 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 274881ca5b..257a5e8842 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -47,7 +47,7 @@ def main(): group = parser.add_mutually_exclusive_group() group.add_argument('--radius-in-km', metavar='RADIUS', type=float, help='The kernel radius for neighbourhood processing') - group.add_argument('--radius-in-km-by-lead-time', + group.add_argument('--radii-in-km-by-lead-time', metavar='RADIUS_BY_LEAD_TIME', nargs=2, help='The kernel radii for neighbourhood processing' 'and the associated lead times at which the radii are' @@ -58,11 +58,15 @@ def main(): help='The output path for the processed NetCDF') args = parser.parse_args() cube = iris.load_cube(args.input_filepath) - radius_in_km = args.radius_in_km_by_lead_time[0] - lead_times = args.radius_in_km_by_lead_time[1] + if args.radius_in_km: + radius_or_radii_in_km = args.radius_in_km + lead_times = None + elif args.radii_in_km_by_lead_time: + radius_or_radii_in_km = args.radii_in_km_by_lead_time[0] + lead_times = args.radius_in_km_by_lead_time[1] result = ( BasicNeighbourhoodProcessing( - args.radius_in_km, + radius_or_radii_in_km, lead_times=lead_times).process(cube)) iris.save(result, args.output_filepath, unlimited_dimensions=[]) diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index a969ccece9..4b090e5a25 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -72,7 +72,7 @@ def __init__(self, radii_in_km, lead_times=None, unweighted_mode=False): apply. Rounded up to convert into integer number of grid points east and north, based on the characteristic spacing at the zero indices of the cube projection-x/y coords. - lead_times : List + lead_times : None or List List of lead times or forecast periods, at which the radii within radii_in_km are defined. unweighted_mode : boolean @@ -86,6 +86,12 @@ def __init__(self, radii_in_km, lead_times=None, unweighted_mode=False): else: self.radii_in_km = float(radii_in_km) self.lead_times = lead_times + if self.lead_times is not None: + if len(radii_in_km) != len(lead_times): + msg = ("There is a mismatch in the number of radii " + "and the number of lead times. " + "Unable to continue due to mismatch.") + raise ValueError(msg) self.unweighted_mode = bool(unweighted_mode) def __str__(self): diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index eae09e904f..37f24bcff7 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -167,6 +167,26 @@ def set_up_cube_lat_long(zero_point_indices=((0, 7, 7),), num_time_points=1, return cube +class Test__init__(IrisTest): + + def test_radii_varying_with_lead_time_mismatch(self): + """ + Test that the desired error message is raised, if there is a mismatch + between the number of radii and the number of lead times. + """ + cube = set_up_cube(num_time_points=3) + iris.util.promote_aux_coord_to_dim_coord(cube, "time") + time_points = cube.coord("time").points + fp_points = [2, 3, 4] + cube = _add_forecast_reference_time_and_forecast_period( + cube, time_point=time_points, fp_point=fp_points) + radii_in_km = [10, 20, 30] + lead_times = [2, 3] + msg = "There is a mismatch in the number of radii" + with self.assertRaisesRegexp(ValueError, msg): + plugin = NBHood(radii_in_km, lead_times) + + class Test_find_required_lead_times(IrisTest): """Test determining of the lead times present within the input cube.""" From 798955eef9e90433fe2f980d7e6209dd3a7f6d00 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 25 May 2017 10:14:09 +0100 Subject: [PATCH 0063/1367] Codacy corrections and conversion of methods to private methods. --- lib/improver/nbhood.py | 17 ++-- ...est_nbhood_basicneighbourhoodprocessing.py | 80 +++++++++++-------- 2 files changed, 55 insertions(+), 42 deletions(-) diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 4b090e5a25..52ba95b2b2 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -100,7 +100,8 @@ def __str__(self): return result.format( self.radii_in_km, self.unweighted_mode) - def find_required_lead_times(self, cube): + @staticmethod + def _find_required_lead_times(cube): """ Determine the lead times within a cube, either by reading the forecast_period coordinate, or by calculating the difference between @@ -134,7 +135,7 @@ def find_required_lead_times(self, cube): raise CoordinateNotFoundError(msg) return required_lead_times - def get_grid_x_y_kernel_ranges(self, cube, radii_in_km): + def _get_grid_x_y_kernel_ranges(self, cube, radii_in_km): """ Return the number of grid cells in the x and y direction to be used to create the kernel. @@ -191,7 +192,7 @@ def get_grid_x_y_kernel_ranges(self, cube, radii_in_km): ) return grid_cells_x, grid_cells_y - def apply_kernel_for_smoothing(self, cube, ranges): + def _apply_kernel_for_smoothing(self, cube, ranges): """ Return the number of grid cells in the x and y direction to be used to create the kernel. @@ -277,10 +278,10 @@ def process(self, cube): if self.lead_times is None: radii_in_km = self.radii_in_km - ranges = self.get_grid_x_y_kernel_ranges(cube, radii_in_km) - cube = self.apply_kernel_for_smoothing(cube, ranges) + ranges = self._get_grid_x_y_kernel_ranges(cube, radii_in_km) + cube = self._apply_kernel_for_smoothing(cube, ranges) else: - required_lead_times = self.find_required_lead_times(cube) + required_lead_times = self._find_required_lead_times(cube) # Interpolate to find the radius at each required lead time. required_radii_in_km = ( np.interp( @@ -290,10 +291,10 @@ def process(self, cube): # and then apply the kernel to smooth the field. for cube_slice, radii_in_km in ( zip(cube.slices_over("time"), required_radii_in_km)): - ranges = self.get_grid_x_y_kernel_ranges( + ranges = self._get_grid_x_y_kernel_ranges( cube_slice, radii_in_km) cube_slice = ( - self.apply_kernel_for_smoothing(cube_slice, ranges)) + self._apply_kernel_for_smoothing(cube_slice, ranges)) cube_slice = iris.util.new_axis(cube_slice, "time") cubes.append(cube_slice) cube = concatenate_cubes(cubes) diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index 37f24bcff7..98bfec6c45 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -184,10 +184,10 @@ def test_radii_varying_with_lead_time_mismatch(self): lead_times = [2, 3] msg = "There is a mismatch in the number of radii" with self.assertRaisesRegexp(ValueError, msg): - plugin = NBHood(radii_in_km, lead_times) + NBHood(radii_in_km, lead_times) -class Test_find_required_lead_times(IrisTest): +class Test__find_required_lead_times(IrisTest): """Test determining of the lead times present within the input cube.""" @@ -197,7 +197,7 @@ def test_basic(self): """Test that a list is returned.""" cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.find_required_lead_times(cube) + result = plugin._find_required_lead_times(cube) self.assertIsInstance(result, np.ndarray) def test_check_coordinate(self): @@ -208,7 +208,7 @@ def test_check_coordinate(self): cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) expected_result = cube.coord("forecast_period").points plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.find_required_lead_times(cube) + result = plugin._find_required_lead_times(cube) self.assertArrayAlmostEqual(result, expected_result) def test_check_coordinate_without_forecast_period(self): @@ -223,7 +223,7 @@ def test_check_coordinate_without_forecast_period(self): cube.coord("time").points - cube.coord("forecast_reference_time").points) plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.find_required_lead_times(cube) + result = plugin._find_required_lead_times(cube) self.assertArrayAlmostEqual(result, expected_result) def test_exception_raised(self): @@ -236,10 +236,10 @@ def test_exception_raised(self): plugin = NBHood(self.RADIUS_IN_KM) msg = "The forecast period coordinate is not available" with self.assertRaisesRegexp(CoordinateNotFoundError, msg): - plugin.find_required_lead_times(cube) + plugin._find_required_lead_times(cube) -class Test_get_grid_x_y_kernel_ranges(IrisTest): +class Test__get_grid_x_y_kernel_ranges(IrisTest): """Test conversion of kernel radius in kilometres to grid cells.""" @@ -249,7 +249,7 @@ def test_basic_radius_to_grid_cells(self): """Test the lat-long radius-to-grid-cell conversion.""" cube = set_up_cube() plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) + result = plugin._get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) self.assertEqual(result, (3, 3)) def test_basic_radius_to_grid_cells_km_grid(self): @@ -258,7 +258,7 @@ def test_basic_radius_to_grid_cells_km_grid(self): cube.coord("projection_x_coordinate").convert_units("kilometres") cube.coord("projection_y_coordinate").convert_units("kilometres") plugin = NBHood(self.RADIUS_IN_KM) - result = plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) + result = plugin._get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) self.assertEqual(result, (3, 3)) def test_single_point_lat_long(self): @@ -267,9 +267,8 @@ def test_single_point_lat_long(self): plugin = NBHood(self.RADIUS_IN_KM) msg = "Invalid grid: projection_x/y coords required" expected = np.zeros_like(cube.data) - ranges = (3, 3) with self.assertRaisesRegexp(ValueError, msg): - plugin.get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) + plugin._get_grid_x_y_kernel_ranges(cube, self.RADIUS_IN_KM) def test_single_point_range_negative(self): """Test behaviour with a non-zero point with negative range.""" @@ -278,7 +277,7 @@ def test_single_point_range_negative(self): plugin = NBHood(radius_in_km) msg = "radius of -6.1 km gives a negative cell extent" with self.assertRaisesRegexp(ValueError, msg): - plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + plugin._get_grid_x_y_kernel_ranges(cube, radius_in_km) def test_single_point_range_0(self): """Test behaviour with a non-zero point with zero range.""" @@ -288,7 +287,7 @@ def test_single_point_range_0(self): msg = "radius of 0.005 km gives zero cell extent" with self.assertRaisesRegexp(ValueError, msg): expected = np.zeros_like(cube.data) - plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + plugin._get_grid_x_y_kernel_ranges(cube, radius_in_km) def test_single_point_range_lots(self): """Test behaviour with a non-zero point with unhandleable range.""" @@ -298,10 +297,10 @@ def test_single_point_range_lots(self): msg = "radius of 500000.0 km exceeds maximum grid cell extent" with self.assertRaisesRegexp(ValueError, msg): expected = np.zeros_like(cube.data) - plugin.get_grid_x_y_kernel_ranges(cube, radius_in_km) + plugin._get_grid_x_y_kernel_ranges(cube, radius_in_km) -class Test_apply_kernel_for_smoothing(IrisTest): +class Test__apply_kernel_for_smoothing(IrisTest): """Test neighbourhood processing plugin on the OS National Grid.""" @@ -312,7 +311,7 @@ def test_basic(self): cube = set_up_cube() plugin = NBHood(self.RADIUS_IN_KM) ranges = (3, 3) - result = plugin.apply_kernel_for_smoothing(cube, ranges) + result = plugin._apply_kernel_for_smoothing(cube, ranges) self.assertIsInstance(result, Cube) def test_single_point(self): @@ -323,7 +322,8 @@ def test_single_point(self): expected[0][5 + index][5:10] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_flat(self): @@ -343,7 +343,7 @@ def test_single_point_flat(self): result = ( NBHood( radius_in_km, - unweighted_mode=True).apply_kernel_for_smoothing( + unweighted_mode=True)._apply_kernel_for_smoothing( cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) @@ -360,7 +360,8 @@ def test_multi_point_multitimes(self): expected[1][5 + index][5:10] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_lat_long(self): @@ -370,7 +371,8 @@ def test_single_point_lat_long(self): expected = np.zeros_like(cube.data) ranges = (3, 3) with self.assertRaisesRegexp(ValueError, msg): - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges) def test_single_point_masked_to_null(self): """Test behaviour with a masked non-zero point. @@ -390,7 +392,8 @@ def test_single_point_masked_to_null(self): expected[time_index][5 + index][5:10] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_masked_other_point(self): @@ -409,7 +412,8 @@ def test_single_point_masked_other_point(self): expected[time_index][5 + index][5:10] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_1(self): @@ -419,7 +423,8 @@ def test_single_point_range_1(self): expected[0][7][7] = 0.0 radius_in_km = 2.1 # Equivalent to a range of 1 grid cell. ranges = (1, 1) - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) + result = NBHood(radius_in_km)._apply_kernel_for_smoothing( + cube, ranges) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5(self): @@ -431,7 +436,8 @@ def test_single_point_range_5(self): expected[time_index][3 + index][3:12] = slice_ radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. ranges = (5, 5) - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) + result = NBHood(radius_in_km)._apply_kernel_for_smoothing( + cube, ranges) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_range_5_small_domain(self): @@ -449,7 +455,7 @@ def test_single_point_range_5_small_domain(self): ]) radius_in_km = 10.5 # Equivalent to a range of 5 grid cells. ranges = (5, 5) - result = NBHood(radius_in_km).apply_kernel_for_smoothing(cube, ranges) + result = NBHood(radius_in_km)._apply_kernel_for_smoothing(cube, ranges) self.assertArrayAlmostEqual(result.data, expected) def test_point_pair(self): @@ -468,7 +474,8 @@ def test_point_pair(self): expected[0][5 + index][4:11] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_edge(self): @@ -480,7 +487,8 @@ def test_single_point_almost_edge(self): expected[0][5 + index][0:5] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_edge(self): @@ -492,7 +500,8 @@ def test_single_point_adjacent_edge(self): expected[0][5 + index][0:4] = slice_[1:] ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_edge(self): @@ -517,7 +526,8 @@ def test_single_point_on_edge(self): expected[0][5 + index][0:3] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_almost_corner(self): @@ -529,7 +539,8 @@ def test_single_point_almost_corner(self): expected[0][index][0:5] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_adjacent_corner(self): @@ -543,7 +554,8 @@ def test_single_point_adjacent_corner(self): expected[0][index - 1][0:4] = slice_[1:] ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) def test_single_point_on_corner(self): @@ -566,7 +578,8 @@ def test_single_point_on_corner(self): expected[0][index][0:3] = slice_ ranges = (3, 3) result = ( - NBHood(self.RADIUS_IN_KM).apply_kernel_for_smoothing(cube, ranges)) + NBHood(self.RADIUS_IN_KM)._apply_kernel_for_smoothing( + cube, ranges)) self.assertArrayAlmostEqual(result.data, expected) @@ -627,8 +640,7 @@ def test_fail_multiple_realisations(self): standard_name="realization"), 0) msg = "Does not operate across realizations" with self.assertRaisesRegexp(ValueError, msg): - plugin = ( - NBHood(self.RADIUS_IN_KM).process(cube)) + NBHood(self.RADIUS_IN_KM).process(cube) def test_radii_varying_with_lead_time(self): """ From f87c8a5778ad051545eee97e23d63fc4449b4d85 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 25 May 2017 14:15:32 +0100 Subject: [PATCH 0064/1367] Corrections to Command Line Interface tests. --- bin/improver-nbhood | 4 ++-- tests/improver-nbhood/00-null.bats | 8 ++++++-- tests/improver-nbhood/01-help.bats | 8 +++++++- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index 257a5e8842..de9fe55cef 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -49,8 +49,8 @@ def main(): help='The kernel radius for neighbourhood processing') group.add_argument('--radii-in-km-by-lead-time', metavar='RADIUS_BY_LEAD_TIME', nargs=2, - help='The kernel radii for neighbourhood processing' - 'and the associated lead times at which the radii are' + help='The kernel radii for neighbourhood processing ' + 'and the associated lead times at which the radii are ' 'valid.') parser.add_argument('input_filepath', metavar='INPUT_FILE', help='A path to an input NetCDF file to be processed') diff --git a/tests/improver-nbhood/00-null.bats b/tests/improver-nbhood/00-null.bats index 9d414a1fbb..cb70ff45bb 100644 --- a/tests/improver-nbhood/00-null.bats +++ b/tests/improver-nbhood/00-null.bats @@ -3,7 +3,11 @@ @test "nbhood no arguments" { run improver nbhood [[ "$status" -eq 2 ]] - expected="usage: improver-nbhood [-h] [--radius-in-km RADIUS]\ - INPUT_FILE OUTPUT_FILE" + read -d '' expected <<'__TEXT__' || true +usage: improver-nbhood [-h] + [--radius-in-km RADIUS | --radii-in-km-by-lead-time \ +RADIUS_BY_LEAD_TIME RADIUS_BY_LEAD_TIME] + INPUT_FILE OUTPUT_FILE +__TEXT__ [[ "$output" =~ "$expected" ]] } diff --git a/tests/improver-nbhood/01-help.bats b/tests/improver-nbhood/01-help.bats index 26bfb62744..fc491781bd 100644 --- a/tests/improver-nbhood/01-help.bats +++ b/tests/improver-nbhood/01-help.bats @@ -4,7 +4,10 @@ run improver nbhood -h [[ "$status" -eq 0 ]] read -d '' expected <<'__HELP__' || true -usage: improver-nbhood [-h] [--radius-in-km RADIUS] INPUT_FILE OUTPUT_FILE +usage: improver-nbhood [-h] + [--radius-in-km RADIUS | --radii-in-km-by-lead-time \ +RADIUS_BY_LEAD_TIME RADIUS_BY_LEAD_TIME] + INPUT_FILE OUTPUT_FILE Apply basic weighted circle smoothing via the BasicNeighbourhoodProcessing plugin to a file with one cube. @@ -17,6 +20,9 @@ optional arguments: -h, --help show this help message and exit --radius-in-km RADIUS The kernel radius for neighbourhood processing + --radii-in-km-by-lead-time RADIUS_BY_LEAD_TIME RADIUS_BY_LEAD_TIME + The kernel radii for neighbourhood processing and the + associated lead times at which the radii are valid. __HELP__ [[ "$output" == "$expected" ]] } From 126c4d6f08c65aacf8734a63c9673d0b7a019aba Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 1 Jun 2017 08:35:00 +0100 Subject: [PATCH 0065/1367] Changes following 1st review. --- lib/improver/spotdata/ancillaries.py | 60 ++- lib/improver/spotdata/common_functions.py | 266 ++++++++-- lib/improver/spotdata/neighbour_finding.py | 465 ++++++------------ .../spotdata/tests/test_neighbour_finding.py | 202 +++++--- 4 files changed, 549 insertions(+), 444 deletions(-) diff --git a/lib/improver/spotdata/ancillaries.py b/lib/improver/spotdata/ancillaries.py index 79de822353..09e48fc9a4 100644 --- a/lib/improver/spotdata/ancillaries.py +++ b/lib/improver/spotdata/ancillaries.py @@ -39,39 +39,49 @@ def get_ancillary_data(diagnostics, ancillary_path): - ''' + """ Takes in a list of desired diagnostics and determines which ancillary (i.e. non-time dependent) fields are required given their neighbour finding or data extraction methods. Args: ----- - diagnostics: dictionary containing each diagnostic to be processed with - associated options for how they should be produced, e.g. - method of neighbour selection, method of data extraction etc. + diagnostics : dict + Dictionary containing each diagnostic to be processed with associated + options for how they should be produced, e.g. method of neighbour + selection, method of data extraction etc. Returns: -------- - ancillary_data: - dictionary containing named ancillary data; the key gives the - name and the item is the iris.cube.Cube of data. + ancillary_data : dict + Dictionary containing named ancillary data; the key gives the name and + the item is the iris.cube.Cube of data. - ''' + Raises: + ------- + IOError if required input files are not found. + + """ ancillary_data = {} - orography = Load('single_file').process( - ancillary_path + '/orography.nc', 'surface_altitude') + try: + orography = Load('single_file').process( + ancillary_path + '/orography.nc', 'surface_altitude') + except: + raise IOError('Orography file not found.') - ancillary_data.update({'orography': orography}) + ancillary_data['orography'] = orography # Check if the land mask is used for any diagnostics. - if any([('land' in diagnostics[key]['neighbour_finding']) + if any([(diagnostics[key]['neighbour_finding']['land_constraint']) for key in diagnostics.keys()]): + try: + land = Load('single_file').process( + ancillary_path + '/land_mask.nc', 'land_binary_mask') + except: + raise IOError('Land mask file not found.') - land = Load('single_file').process( - ancillary_path + '/land_mask.nc', 'land_binary_mask') - - ancillary_data.update({'land': land}) + ancillary_data['land_mask'] = land return ancillary_data @@ -80,27 +90,31 @@ def get_ancillary_data(diagnostics, ancillary_path): # raises an exception if it is missing. def data_from_ancillary(ancillary_data, key): - ''' + """ Check for an iris.cube.Cube of information in the ancillary data dictionary. Args: ----- - ancillary_data : ancillary_data dictionary defined by get_ancillary_data - function. - key : name of ancillary field requested. + ancillary_data : dict + Dictionary defined by get_ancillary_data function that contains + iris.cube.Cube ancillary data. + + key : string + Name of ancillary field requested. Returns: -------- - iris.cube.Cube.data from the . + data : numpy.array + Ancillary data array extracted from iris.cube.Cube. Raises: ------- Exception if the cube has not been loaded. - ''' + """ - if ancillary_data is not None and ancillary_data[key]: + if key in ancillary_data.keys(): return ancillary_data[key].data else: raise Exception('Ancillary data {} has not been loaded.'.format(key)) diff --git a/lib/improver/spotdata/common_functions.py b/lib/improver/spotdata/common_functions.py index 6fecfaa622..b41fbfbf71 100644 --- a/lib/improver/spotdata/common_functions.py +++ b/lib/improver/spotdata/common_functions.py @@ -40,32 +40,38 @@ class ConditionalListExtract(object): - ''' + """ Performs a numerical comparison, the type selected with method, of data in an array and returns an array of indices in that data array that fulfill the comparison. Args: ----- - method : which comparison to make, e.g. not_equal_to. - data : array of values to be filtered. - indices_list : list of indices in the data array that should be - considered. - comparison_value: the value against which numbers in data are to be - compared. + method : string + Which comparison to make, e.g. not_equal_to. + + data : numpy.array + Array of values to be filtered. + + indices_list : list + Indices in the data array that should be considered. + + comparison_value: float + Value against which numbers in data are to be compared. Returns: -------- - array_of_indices.tolist(): - a list of the the indices of data values that fulfill the - comparison condition. - ''' + array_of_indices.tolist(): list + A list of the the indices of data values that fulfill the comparison + condition. + + """ def __init__(self, method): self.method = method def process(self, data, indices_list, comparison_value): - ''' Call the data comparison method passed in''' + """ Call the data comparison method passed in""" array_of_indices = np.array(indices_list) function = getattr(self, self.method) subset = function(data, array_of_indices, comparison_value) @@ -74,7 +80,7 @@ def process(self, data, indices_list, comparison_value): @staticmethod def less_than(data, array_of_indices, comparison_value): - ''' Return indices of array for which value < comparison_value ''' + """ Return indices of array for which value < comparison_value """ return np.where( data[[array_of_indices[0], array_of_indices[1]]] < comparison_value @@ -82,7 +88,7 @@ def less_than(data, array_of_indices, comparison_value): @staticmethod def greater_than(data, array_of_indices, comparison_value): - ''' Return indices of array for which value > comparison_value ''' + """ Return indices of array for which value > comparison_value """ return np.where( data[[array_of_indices[0], array_of_indices[1]]] > comparison_value @@ -90,7 +96,7 @@ def greater_than(data, array_of_indices, comparison_value): @staticmethod def equal_to(data, array_of_indices, comparison_value): - ''' Return indices of array for which value == comparison_value ''' + """ Return indices of array for which value == comparison_value """ return np.where( data[[array_of_indices[0], array_of_indices[1]]] == comparison_value @@ -98,17 +104,14 @@ def equal_to(data, array_of_indices, comparison_value): @staticmethod def not_equal_to(data, array_of_indices, comparison_value): - ''' Return indices of array for which value != comparison_value ''' + """ Return indices of array for which value != comparison_value """ return np.where( data[[array_of_indices[0], array_of_indices[1]]] != comparison_value ) - -# Common shared functions - def nearest_n_neighbours(i, j, no_neighbours, exclude_self=False): - ''' + """ Returns a coordinate list of n points comprising the original coordinate (i,j) plus the n-1 neighbouring points on a cartesian grid. e.g. n = 9 @@ -127,21 +130,26 @@ def nearest_n_neighbours(i, j, no_neighbours, exclude_self=False): Args: ----- - i, j : central coordinate about which to find neighbours. - no_neighbours : no. of neighbours to return (9, 25, 49, etc). - exclude_self : boolean, if True, (i,j) excluded from returned list. + i, j : ints + Central coordinate about which to find neighbours. + + no_neighbours : int + No. of neighbours to return (9, 25, 49, etc). + + exclude_self : boolean + If True, the central coordinate (i,j) is excluded from returned list. Returns: -------- - list of array indices that neighbour the central (i,j) point. + Array of neighbouring indices : numpy.array - ''' + """ # Check n is a valid no. for neighbour finding. root_no_neighbours = np.sqrt(no_neighbours) delta_neighbours = (root_no_neighbours - 1)/2 if not np.mod(delta_neighbours, 1) == 0 or delta_neighbours < 1: raise ValueError( - 'Invalid neareat no. of neighbours request. N={} is not a valid ' + 'Invalid nearest no. of neighbours request. N={} is not a valid ' 'square no. (sqrt N must be odd)'.format(no_neighbours)) delta_neighbours = int(delta_neighbours) @@ -156,7 +164,7 @@ def nearest_n_neighbours(i, j, no_neighbours, exclude_self=False): def node_edge_test(node_list, cube): - ''' + """ Node lists produced using the nearest_n_neighbours function may overspill the domain of the array from which data is to be extracted. This function checks whether the cube of interest is a global domain with a wrapped @@ -166,16 +174,20 @@ def node_edge_test(node_list, cube): Args ---- - node_list : list[[i],[j]] of indices. - cube : the cube for which data will be extracted using the - indices (e.g. cube.data[node_list]). + node_list : list + List of indices with a structure [[i],[j]]. + cube : iris.cube.Cube + A cube containing the grid from which the i,j coordinates have been + selected, and which will be used to determine if these points fall + on the edge of the domain. Returns ------- - node_list : modified node_list with points beyond the cube boundary - either changed or discarded as appropriate. + node_list : list + Modified node_list with points beyond the cube boundary either changed + or discarded as appropriate. - ''' + """ node_list = np.array(node_list) @@ -194,28 +206,58 @@ def node_edge_test(node_list, cube): return node_list.tolist() -def get_nearest_coords(cube, latitude, longitude): - ''' +def get_nearest_coords(cube, latitude, longitude, iname, jname): + """ Uses the iris cube method nearest_neighbour_index to find the nearest grid points to a given latitude-longitude position. - ''' - i_latitude = (cube.coord(axis='y').nearest_neighbour_index(latitude)) - j_longitude = (cube.coord(axis='x').nearest_neighbour_index(longitude)) + Args: + ----- + cube : iris.cube.Cube + Cube containing a representative grid. + + latitude/longitude : floats + Latitude/longitude coordinates of spot data site of interest. + + iname/jname : strings + Strings giving the names of the y/x coordinates to be searched. + + Returns: + ------- + i_latitude/j_latitude : ints + Grid coordinates of the nearest grid point to the spot data site. + + """ + i_latitude = cube.coord(iname).nearest_neighbour_index(latitude) + j_longitude = cube.coord(jname).nearest_neighbour_index(longitude) return i_latitude, j_longitude def index_of_minimum_difference(whole_list, subset_list=None): - ''' + """ Returns the index of the minimum value in a list. - ''' + + Args: + ----- + whole_list : numpy.array + Array to be searched for a minimum value. + + subset_list : numpy.array/None + Array of indices to include in the search. If None the entirity of + whole_list is searched. + + Returns: + -------- + Index of the minimum value in whole_list. + + """ if subset_list is None: subset_list = np.arange(len(whole_list)) return subset_list[np.argmin(abs(whole_list[subset_list]))] def list_entry_from_index(list_in, index_in): - ''' + """ Extracts index_in element from each list in a list of lists, and returns as a list. e.g. @@ -223,18 +265,142 @@ def list_entry_from_index(list_in, index_in): index_in = 1 Returns [1,6,9] - ''' - n_columns = len(list_in) - return [list_in[n_col][index_in] for n_col in range(n_columns)] + """ + return list(zip(*list_in)[index_in]) def datetime_constraint(time_in): - ''' + """ Constructs an iris equivalence constraint from a python datetime object. - ''' + Args: + ----- + time_in : datetime.datetime object + The time to be used to build an iris constraint. + + Returns: iris.Constraint + An iris constraint to be used in extracting data at the given time from + a cube. + + """ return Constraint( - time=PartialDateTime(*[int(time_in.strftime("%{}".format(x))) - for x in - ['Y', 'm', 'd', 'H']]) - ) + time=PartialDateTime(time_in.year, time_in.month, + time_in.day, time_in.hour)) + + +def construct_neighbour_hash(neighbour_finding): + """ + Constructs a hash from the various neighbour finding options. This is used + to avoid repeating the same neighbour search more than once. + + Args: + ----- + neighbour_finding : dict + A dictionary containing the method, vertical_bias, and land_constraint + options for neighbour finding. + + Returns: + -------- + neighbour_hash : string + A concatenated string of the options + e.g. 'fast_nearest_neighbour-None-False' + + """ + return '{}-{}-{}'.format(neighbour_finding['method'], + neighbour_finding['vertical_bias'], + neighbour_finding['land_constraint']) + + +def apply_bias(vertical_bias, dzs): + """ + Bias neighbour selection to look for grid points with an + altitude that is above or below the site if vertical_bias is + not None. + + Args: + ----- + vertical_bias : string/None + Sets the preferred vertical displacement of the grid point + relative to the site; above/below/None. + + dzs : numpy.array + Array of vertical displacments calculated as the subtraction of grid + orography altitudes from spot site altitudes. + + Returns: + -------- + dz_subset : numpy.array + Indices of grid points that satisfy bias condition if any are + available, otherwise it returns the whole set. + + """ + if vertical_bias == 'above': + dz_subset, = np.where(dzs <= 0) + elif vertical_bias == 'below': + dz_subset, = np.where(dzs >= 0) + + if (vertical_bias is None or len(dz_subset) == 0 + or len(dz_subset) == len(dzs)): + dz_subset = np.arange(len(dzs)) + + return dz_subset + + +def xy_test(cube): + """ + Test whether a diagnostic cube is on a latitude/longitude grid or uses an + alternative projection. + + Args: + ----- + cube : iris.cube.Cube + A diagnostic cube to examine for coordinate system. + + Returns: + -------- + trg_crs : cartopy.crs/None + Coordinate system of the diagnostic cube in a cartopy format unless it + is already a latitude/longitude grid, in which case None is returned. + + """ + trg_crs = None + if (not cube.coord(axis='x').name() == 'longitude' or + not cube.coord(axis='y').name() == 'latitude'): + trg_crs = cube.coord_system().as_cartopy_crs() + return trg_crs + + +def xy_transform(trg_crs, latitude, longitude): + """ + Transforms latitude/longitude coordinate pairs from a latitude/longitude + grid into an alternative projection defined by trg_crs. + + Args: + ----- + trg_crs : cartopy.crs/None + Target coordinate system in cartopy format or None. + + latitude : float + Latitude coordinate. + + longitude : float + Longitude coordinate. + + Returns: + -------- + x, y : floats + Longitude and latitude transformed into the target coordinate system. + + """ + if trg_crs is None: + return longitude, latitude + else: + return trg_crs.transform_point(longitude, latitude, + ccrs.PlateCarree()) + +def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): + """ + Floating point comparison for nearly equal. + + """ + return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) diff --git a/lib/improver/spotdata/neighbour_finding.py b/lib/improver/spotdata/neighbour_finding.py index 5c2b73a6a9..4a53ffa123 100644 --- a/lib/improver/spotdata/neighbour_finding.py +++ b/lib/improver/spotdata/neighbour_finding.py @@ -33,14 +33,14 @@ import numpy as np import cartopy.crs as ccrs -# from iris.analysis.trajectory import interpolate from improver.spotdata.ancillaries import data_from_ancillary from improver.spotdata.common_functions import (ConditionalListExtract, nearest_n_neighbours, get_nearest_coords, index_of_minimum_difference, list_entry_from_index, - node_edge_test) + node_edge_test, apply_bias, + xy_test, xy_transform, isclose) class PointSelection(object): @@ -48,28 +48,104 @@ class PointSelection(object): For the selection of source data from a grid for use in deriving conditions at an arbitrary coordinate. + Methods available for determining the neighbours are: + + fast_nearest_neighbour: Closest neighbouring grid point to spot site + calculated on a 2D plane (lat/lon). + + minimum_height_error_neighbour + : This method uses the nearest neighbour as a + starting point but then loosens this constraint + to minimise the vertical displacement between + the spot site and grid points. + """ - def __init__(self, method='default'): - """neighbour_list = find_nearest_neighbours(cube, spot_sites) + def __init__(self, method='fast_nearest_neighbour', + vertical_bias=None, land_constraint=False): + """ The class is called with the desired method to be used in determining - the grid point closest to sites of interest. + the grid points closest to sites of interest. + + Args: + ----- + method : string + Name of the method of neighbour finding to be used. + + vertical_bias : string/None + Sets the preferred vertical displacement bias of the grid point + relative to the site; above/below/None. If this criteria cannot be + met (e.g. bias below, but all grid points above site) the smallest + vertical displacment neighbour will be returned. + + land_constraint : boolean + If True spot data sites on land should only select neighbouring + grid points also over land. """ self.method = method + self.vertical_bias = vertical_bias + self.land_constraint = land_constraint - def process(self, cube, sites, **kwargs): + def process(self, cube, sites, ancillary_data, + default_neighbours=None, no_neighbours=9): """ - Call the correct function to enact the method of PointSelection - specified. + Call the selected method for determining neighbouring grid points + after preparing the necessary diagnostics to be passed in. + + Args: + ----- + cube : iris.cube.Cube + Cube of gridded data of a diagnostic; the diagnostic is unimportant + as long as the grid is structured in the same way as those from + which data will be extracted using the neighbour list. + + sites : OrderedDict + Site data, including latitude/longitude and altitude information. + e.g. {: {'latitude': 50, 'longitude': 0, 'altitude': 10}} + + ancillary_data : dict + Dictionary of ancillary (time invariant) model data that is needed. + e.g. {'orography': } + + default_neighbours/no_neighbours : see minimum_height_error_neighbour() + below. + Returns: + -------- + neighbours : numpy.dtype (fields: i, j, dz, edgepoint) + Array of grid i,j coordinates that are nearest to each site + coordinate given. Includes vertical displacement between site and + returned grid point if orography is provided. Edgepoint is a + boolean that indicates if the chosen grid point neighbour is on the + edge of the domain for a circular (e.g. global cylindrical) grid. """ - function = getattr(self, self.method) - return function(cube, sites, **kwargs) + if self.method == 'fast_nearest_neighbour': + if 'orography' in ancillary_data.keys(): + orography = data_from_ancillary(ancillary_data, 'orography') + else: + orography = None + return self.fast_nearest_neighbour(cube, sites, + orography=orography) + elif self.method == 'minimum_height_error_neighbour': + orography = data_from_ancillary(ancillary_data, 'orography') + + land_mask = None + if self.land_constraint: + land_mask = data_from_ancillary(ancillary_data, 'land_mask') + + return self.minimum_height_error_neighbour( + cube, sites, orography, land_mask=land_mask, + default_neighbours=default_neighbours, + no_neighbours=no_neighbours) + else: + # Should not make it here unless an unknown method is passed in. + raise AttributeError('Unknown method "{}" passed to {}.'.format( + self.method, self.__class__.__name__)) @staticmethod - def fast_nearest_neighbour(cube, sites, ancillary_data=None): - ''' + def fast_nearest_neighbour(cube, sites, orography=None): + """ Use iris coord.nearest_neighbour_index function to locate the nearest grid point to the given latitude/longitude pair. @@ -78,40 +154,33 @@ def fast_nearest_neighbour(cube, sites, ancillary_data=None): neighbour search with projection onto a spherical surface; this is typically much slower. + Args: ----- - cube : Iris cube of gridded data. - sites : Dictionary of site data, including lat/lon and - altitude information. - e.g. {: {'latitude': 50, 'longitude': 0, - 'altitude': 10}} - ancillary_data : A dictionary containing additional model data that - is needed. e.g. {'orography': } - + cube/sites : See process() above. + + orography : numpy.array + Array of orography data extracted from an iris.cube.Cube that + corresponds to the grids on which all other input diagnostics + will be provided (iris.cube.Cube.data). + Returns: -------- - neighbours: Numpy array of grid i,j coordinates that are nearest to - each site coordinate given. Includes height difference - between site and returned grid point if orography is - provided. - - ''' - if ancillary_data is not None and ancillary_data['orography']: - calculate_dz = True - orography = data_from_ancillary(ancillary_data, 'orography') - else: - calculate_dz = False + neighbours: See process() above. + """ neighbours = np.empty(len(sites), dtype=[('i', 'i8'), ('j', 'i8'), ('dz', 'f8'), - ('edge', 'bool_')]) + ('edgepoint', 'bool_')]) # Check cube coords are lat/lon, else transform lookup coordinates. trg_crs = xy_test(cube) imax = cube.coord(axis='y').shape[0] jmax = cube.coord(axis='x').shape[0] + iname = cube.coord(axis='y').name() + jname = cube.coord(axis='x').name() for i_site, site in enumerate(sites.itervalues()): latitude, longitude, altitude = (site['latitude'], @@ -119,11 +188,10 @@ def fast_nearest_neighbour(cube, sites, ancillary_data=None): site['altitude']) longitude, latitude = xy_transform(trg_crs, latitude, longitude) - i_latitude, j_longitude = get_nearest_coords(cube, latitude, - longitude) - + i_latitude, j_longitude = get_nearest_coords( + cube, latitude, longitude, iname, jname) dz_site_grid = 0. - if calculate_dz: + if orography is not None: dz_site_grid = altitude - orography[i_latitude, j_longitude] neighbours[i_site] = (int(i_latitude), int(j_longitude), @@ -132,100 +200,19 @@ def fast_nearest_neighbour(cube, sites, ancillary_data=None): return neighbours -# @staticmethod -# def nearest_neighbour(cube, sites, ancillary_data=None): -# ''' -# Uses the -# iris.analysis._interpolate_private._nearest_neighbour_indices_ndcoords -# function to locate the nearest grid point to the given latitude/ -# longitude pair, taking into account the projection of the cube. -# -# Method is equivalent to extracting data directly with -# iris.analysis.trajectory.interpolate method, which calculates nearest -# neighbours using great arcs on a spherical surface. Using the private -# function we are able to get the list of indices for reuse by multiple -# diagnostics. -# -# Args: -# ----- -# cube : Iris cube of gridded data. -# sites : Dictionary of site data, including lat/lon and -# altitude information. -# ancillary_data : A dictionary containing additional model data that -# is needed. e.g. {'orography': } -# -# Returns: -# -------- -# neighbours: Numpy array of grid i,j coordinates that are nearest to -# each site coordinate given. Includes height difference -# between site and returned grid point if orography is -# provided. -# -# ''' -# if ancillary_data is not None and ancillary_data['orography']: -# calculate_dz = True -# orography = data_from_ancillary(ancillary_data, 'orography') -# else: -# calculate_dz = False -# -# neighbours = np.empty(len(sites), dtype=[('i', 'i8'), -# ('j', 'i8'), -# ('dz', 'f8')]) -# -# # Check cube coords are lat/lon, else transform lookup coordinates. -# trg_crs = xy_test(cube) -# -# spot_sites = [('latitude', -# [sites[key]['latitude'] for key in sites.keys()]), -# ('longitude', -# [sites[key]['longitude'] for key in sites.keys()])] -# -# spot_orography = interpolate(cube, spot_sites, method='nearest') -# -# cube_lats = cube.coord(axis='y').points -# spot_lats = spot_orography.coord('latitude').points -# -# cube_lons = cube.coord(axis='x').points -# spot_lons = spot_orography.coord('longitude').points -# -# int_ind_i = [] -# int_ind_j = [] -# for point in spot_lats: -# indices_lat = (np.where(point == cube_lats)[0][0]) -# int_ind_i.append(indices_lat) -# for point in spot_lons: -# indices_lon = (np.where(point == cube_lons)[0][0]) -# int_ind_j.append(indices_lon) -# i_indices = int_ind_i -# j_indices = int_ind_j -# -# # i_indices, j_indices = zip(*[(i, j) for _, i, j in neighbour_list]) -# -# dz = [0] * len(neighbour_list) -# if calculate_dz: -# altitudes = [sites[key]['altitude'] for key in sites.keys()] -# dz = altitudes - orography[i_indices, j_indices] -# -# neighbours['i'] = i_indices -# neighbours['j'] = j_indices -# neighbours['dz'] = dz -# -# return neighbours - - def minimum_height_error_neighbour(self, cube, sites, + def minimum_height_error_neighbour(self, cube, sites, orography, + land_mask=None, default_neighbours=None, - relative_z=None, - land_constraint=False, - ancillary_data=None): - - ''' + no_neighbours=9): + """ Find the horizontally nearest neighbour, then relax the conditions - to find the neighbouring point in the 9 nearest nodes to the input - coordinate that minimises the height difference. This is typically - used for temperature, where vertical displacement can be much more - important that horizontal displacement in determining the conditions. + to find the neighbouring point in the "no_neighbours" nearest nodes to + the input coordinate that minimises the height difference. This is + typically used for temperature, where vertical displacement can be much + more important that horizontal displacement in determining the + conditions. - A vertical displacement bias may be applied with the relative_z + A vertical displacement bias may be applied with the vertical_bias keyword; whether to prefer grid points above or below the site, or neither. @@ -238,208 +225,84 @@ def minimum_height_error_neighbour(self, cube, sites, Args: ----- - cube : Iris cube of gridded data. - sites : Dictionary of site data, including lat/lon and - altitude information. - relative_z : Sets the preferred vertical displacement of the grid - point relative to the site; above/below/None. - land_constraint: A boolean that determines if land sites should only - select from grid points also over land. - ancillary_data : A dictionary containing additional model data that - is needed. - Must contain {'orography': }. - Needs {'land': } if using land - constraint. + cube/sites : See process() above. + + default_neighbours : numpy.array + An existing list of neighbours from which variations are made using + specified options (e.g. land_constraint). If unset the + fast_nearest_neighbour method will be used to build this list. + + orography : numpy.array + Array of orography data extracted from an iris.cube.Cube that + corresponds to the grids on which all other input diagnostics + will be provided. + + land_mask : numpy.array + Array of land_mask data extracted from an iris.cube.Cube that + corresponds to the grids on which all other input diagnostics + will be provided. + + no_neighbours : int + Number of grid points about the site to consider when relaxing the + nearest neighbour condition. If unset this defaults to 9. + e.g. consider a 5x5 grid of points -> no_neighbours = 25. Returns: -------- - neighbours: Numpy array of grid i,j coordinates that are nearest to - each site coordinate given. Includes height difference - between site and returned grid point. + neighbours: See process() above. + + """ - ''' # Use the default nearest neighbour list as a starting point, and # if for some reason it is missing, recreate the list using the fast # method. if default_neighbours is None: - neighbour_list = self.fast_nearest_neighbour(cube, sites, - ancillary_data) + neighbours = self.fast_nearest_neighbour(cube, sites, + orography=orography) else: - neighbour_list = default_neighbours - - orography = data_from_ancillary(ancillary_data, 'orography') - if land_constraint: - land = data_from_ancillary(ancillary_data, 'land') + neighbours = default_neighbours for i_site, site in enumerate(sites.itervalues()): + altitude = site['altitude'] - i, j = neighbour_list['i'][i_site], neighbour_list['j'][i_site] - edgecase = neighbour_list['edge'][i_site] + i, j, dz_nearest = (neighbours['i'][i_site], + neighbours['j'][i_site], + neighbours['dz'][i_site]) + edgepoint = neighbours['edgepoint'][i_site] - node_list = nearest_n_neighbours(i, j, 9) - if edgecase: + node_list = nearest_n_neighbours(i, j, no_neighbours) + if edgepoint: node_list = node_edge_test(node_list, cube) - if land_constraint: + if self.land_constraint: # Check that we are considering a land point and that at least # one neighbouring point is also land. If not no modification # is made to the nearest neighbour coordinates. - exclude_self = nearest_n_neighbours(i, j, 9, exclude_self=True) - if edgecase: + exclude_self = nearest_n_neighbours(i, j, no_neighbours, + exclude_self=True) + if edgepoint: exclude_self = node_edge_test(exclude_self, cube) - if not land[i, j] or not any(land[exclude_self]): + if not land_mask[i, j] or not any(land_mask[exclude_self]): continue + # Filter the node_list to keep only land points + # (land_mask == 1). node_list = ConditionalListExtract('not_equal_to').process( - land, node_list, 0) + land_mask, node_list, 0) - dz_nearest = abs(altitude - orography[i, j]) dzs = altitude - orography[node_list] - - dzs, dz_nearest, dz_subset = apply_bias( - relative_z, dzs, dz_nearest, altitude, orography, i, j) + dz_subset = apply_bias(self.vertical_bias, dzs) ij_min = index_of_minimum_difference(dzs, subset_list=dz_subset) i_min, j_min = list_entry_from_index(node_list, ij_min) dz_min = abs(altitude - orography[i_min, j_min]) - if dz_min < dz_nearest: - neighbour_list[i_site] = i_min, j_min, dzs[ij_min], edgecase - - return neighbour_list - -# Wrapper routines to use the dz minimisation routine with various options. -# These can be called as methods and set in the diagnostic configs. -# It may be better to simply use the keyword options at a higher level, -# but that will make the config more complex. - - def min_dz_no_bias(self, cube, sites, **kwargs): - ''' Return local grid neighbour with minimum vertical displacement''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z=None, - **kwargs) - - def min_dz_biased_above(self, cube, sites, **kwargs): - ''' - Return local grid neighbour with minimum vertical displacement, - biased to select grid points above the site altitude. - - ''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z='above', - **kwargs) - - def min_dz_biased_below(self, cube, sites, **kwargs): - ''' - Return local grid neighbour with minimum vertical displacement, - biased to select grid points below the site altitude. - - ''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z='below', - **kwargs) - - def min_dz_land_no_bias(self, cube, sites, **kwargs): - ''' - Return local grid neighbour with minimum vertical displacement. - Require land point neighbour if site is a land point. - - ''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z=None, - land_constraint=True, - **kwargs) - - def min_dz_land_biased_above(self, cube, sites, **kwargs): - ''' - Return local grid neighbour with minimum vertical displacement, - biased to select grid points above the site altitude. - Require land point neighbour if site is a land point. - - ''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z='above', - land_constraint=True, - **kwargs) - - def min_dz_land_biased_below(self, cube, sites, **kwargs): - ''' - Return local grid neighbour with minimum vertical displacement, - biased to select grid points below the site altitude. - Require land point neighbour if site is a land point. - - ''' - return self.minimum_height_error_neighbour(cube, sites, - relative_z='below', - land_constraint=True, - **kwargs) - - -def apply_bias(relative_z, dzs, dz_nearest, altitude, orography, i, j): - ''' - Bias neighbour selection to look for grid points with an - altitude that is above or below the site if relative_z is - not None. - - ''' - if relative_z == 'above': - dz_subset, = np.where(dzs <= 0) - if dz_nearest > 0: - dz_nearest = 1.E6 - elif relative_z == 'below': - dz_subset, = np.where(dzs >= 0) - if dz_nearest < 0: - dz_nearest = 1.E6 - - if relative_z is None or len(dz_subset) == 0 or len(dz_subset) == len(dzs): - dz_subset = np.arange(len(dzs)) - dz_nearest = abs(altitude - orography[i, j]) - - return dzs, dz_nearest, dz_subset - - -def xy_test(cube): - ''' - Test whether a diagnostic cube is on a latitude/longitude grid or uses an - alternative projection. - - Args: - ----- - cube : A diagnostic cube to test. - - Returns: - -------- - trg_crs : None if the cube data is on a latitude/longitude grid. Otherwise - trg_crs is the coordinate system in a cartopy format. - ''' - trg_crs = None - if (not cube.coord(axis='x').name() == 'longitude' or - not cube.coord(axis='y').name() == 'latitude'): - trg_crs = cube.coord_system().as_cartopy_crs() - return trg_crs - - -def xy_transform(trg_crs, latitude, longitude): - ''' - Transforms latitude/longitude coordinate pairs from a latitude/longitude - grid into an alternative projection defined by trg_crs. - - Args: - ----- - trg_crs : Target coordinate system in cartopy format. - latitude : Latitude coordinate. - longitude : Longitude coordinate. - - Returns: - -------- - x, y : longitude and latitude transformed into the target coordinate - system. - - ''' - if trg_crs is None: - return longitude, latitude - else: - return trg_crs.transform_point(longitude, latitude, - ccrs.PlateCarree()) + # Test to ensure that if multiple vertical displacements are the + # same we don't select a more distant point because of array + # ordering. + if not isclose(dz_min, abs(dz_nearest)): + neighbours[i_site] = i_min, j_min, dzs[ij_min], edgepoint + + return neighbours diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index 42e81bc266..9828d24128 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -69,7 +69,7 @@ def setUp(self): ancillary_data = {} ancillary_data.update({'orography': orography}) - ancillary_data.update({'land': land}) + ancillary_data.update({'land_mask': land}) sites = OrderedDict() sites.update({'100': {'latitude': 50, @@ -82,40 +82,78 @@ def setUp(self): neighbour_list = np.empty(1, dtype=[('i', 'i8'), ('j', 'i8'), ('dz', 'f8'), - ('edge', 'bool_')]) + ('edgepoint', 'bool_')]) self.cube = cube self.ancillary_data = ancillary_data self.sites = sites self.neighbour_list = neighbour_list - def return_types(self, method): + def return_types(self, method, vertical_bias=None, land_constraint=False): """Test that the plugin returns a numpy array.""" - plugin = PointSelection(method) - result = plugin.process(self.cube, self.sites, - ancillary_data=self.ancillary_data) + plugin = PointSelection(method, vertical_bias, land_constraint) + result = plugin.process(self.cube, self.sites, self.ancillary_data) self.assertIsInstance(result, np.ndarray) self.assertEqual(result.dtype, self.neighbour_list.dtype) - def correct_neighbour(self, method, i_expected, j_expected, dz_expected): + def correct_neighbour(self, method, i_expected, j_expected, dz_expected, + vertical_bias=None, land_constraint=False): """Test that the plugin returns the expected neighbour""" - plugin = PointSelection(method) - result = plugin.process(self.cube, self.sites, - ancillary_data=self.ancillary_data) + plugin = PointSelection(method, vertical_bias, land_constraint) + result = plugin.process(self.cube, self.sites, self.ancillary_data) self.assertEqual(result['i'], i_expected) self.assertEqual(result['j'], j_expected) self.assertEqual(result['dz'], dz_expected) - def without_ancillary_data(self, method): + def without_ancillary_data(self, method, vertical_bias=None, land_constraint=False): """Test plugins behaviour with no ancillary data provided""" - plugin = PointSelection(method) + plugin = PointSelection(method, vertical_bias, land_constraint) if method == 'fast_nearest_neighbour': - result = plugin.process(self.cube, self.sites) + result = plugin.process(self.cube, self.sites, {}) self.assertIsInstance(result, np.ndarray) else: msg = 'Ancillary data' with self.assertRaisesRegexp(Exception, msg): - result = plugin.process(self.cube, self.sites) + result = plugin.process(self.cube, self.sites, {}) + + +class miscellaneous(TestNeighbourFinding): + def test_invalid_method(self): + """ + Test that the plugin can handle an invalid method being passed in. + + """ + plugin = PointSelection('smallest distance') + msg = 'Unknown method' + with self.assertRaisesRegexp(AttributeError, msg): + result = plugin.process(self.cube, self.sites, self.ancillary_data) + + def test_variable_no_neighbours(self): + """ + Test that the plugin can handle a variable number of neigbours to use + when relaxing the 'nearest' condition. Make the smallest displacement + point 2-grid cells away, so it should be captured with no_neighbours + set to 25. + + """ + self.ancillary_data['orography'].data[13, 10] = 10. + plugin = PointSelection(method='minimum_height_error_neighbour', + vertical_bias=None, + land_constraint=False) + result = plugin.process(self.cube, self.sites, self.ancillary_data, + no_neighbours=25) + self.assertEqual(result['i'], 13) + self.assertEqual(result['j'], 10) + self.assertEqual(result['dz'], 0.) + + def test_invalid_no_neighbours(self): + plugin = PointSelection(method='minimum_height_error_neighbour', + vertical_bias=None, + land_constraint=False) + msg = 'Invalid nearest no' + with self.assertRaisesRegexp(ValueError, msg): + result = plugin.process(self.cube, self.sites, self.ancillary_data, + no_neighbours=20) class fast_nearest_neighbour(TestNeighbourFinding): @@ -150,7 +188,8 @@ class min_dz_no_bias(TestNeighbourFinding): ''' - method = 'min_dz_no_bias' + method = 'minimum_height_error_neighbour' + #min_dz_no_bias def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -181,7 +220,7 @@ def test_correct_neighbour_orography_equal_displacement(self): vertical displacement. No relative altitude bias in selection. In this case of equal minimum vertical grid point displacements above - and below the site the code will select the first occurence of this + and below the site the code will select the first occurrence of this smallest dz that is comes across; (14, 10) is tested before (16, 10). ''' self.ancillary_data['orography'].data[14, 10] = 9. @@ -211,22 +250,23 @@ class min_dz_biased_above(TestNeighbourFinding): ''' - method = 'min_dz_biased_above' + method = 'minimum_height_error_neighbour' + # min_dz_biased_above def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' - self.return_types(self.method) + self.return_types(self.method, vertical_bias='above') def test_without_ancillary_data(self): ''' Ensure an exception is raised if needed ancillary fields are missing. ''' - self.without_ancillary_data(self.method) + self.without_ancillary_data(self.method, vertical_bias='above') def test_correct_neighbour_no_orography(self): '''Nearest neighbouring grid point with no other conditions''' - self.correct_neighbour(self.method, 15, 10, 10.) + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='above') def test_correct_neighbour_orography(self): ''' @@ -235,7 +275,7 @@ def test_correct_neighbour_orography(self): altitudes above the site if these are available. ''' self.ancillary_data['orography'].data[14, 10] = 10. - self.correct_neighbour(self.method, 14, 10, 0.) + self.correct_neighbour(self.method, 14, 10, 0., vertical_bias='above') def test_correct_neighbour_orography_equal_displacement(self): ''' @@ -250,7 +290,7 @@ def test_correct_neighbour_orography_equal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 16, 10, -1.) + self.correct_neighbour(self.method, 16, 10, -1., vertical_bias='above') def test_correct_neighbour_orography_unequal_displacement(self): ''' @@ -265,7 +305,7 @@ def test_correct_neighbour_orography_unequal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 12. - self.correct_neighbour(self.method, 16, 10, -2.) + self.correct_neighbour(self.method, 16, 10, -2., vertical_bias='above') class min_dz_biased_below(TestNeighbourFinding): @@ -278,22 +318,23 @@ class min_dz_biased_below(TestNeighbourFinding): ''' - method = 'min_dz_biased_below' + method = 'minimum_height_error_neighbour' + #min_dz_biased_below' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' - self.return_types(self.method) + self.return_types(self.method, vertical_bias='below') def test_without_ancillary_data(self): ''' Ensure an exception is raised if needed ancillary fields are missing. ''' - self.without_ancillary_data(self.method) + self.without_ancillary_data(self.method, vertical_bias='below') def test_correct_neighbour_no_orography(self): '''Nearest neighbouring grid point with no other conditions''' - self.correct_neighbour(self.method, 15, 10, 10.) + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='below') def test_correct_neighbour_orography(self): ''' @@ -302,7 +343,7 @@ def test_correct_neighbour_orography(self): altitudes below the site if these are available. ''' self.ancillary_data['orography'].data[14, 10] = 10. - self.correct_neighbour(self.method, 14, 10, 0.) + self.correct_neighbour(self.method, 14, 10, 0., vertical_bias='below') def test_correct_neighbour_orography_equal_displacement(self): ''' @@ -317,7 +358,7 @@ def test_correct_neighbour_orography_equal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 14, 10, 1.) + self.correct_neighbour(self.method, 14, 10, 1., vertical_bias='below') def test_correct_neighbour_orography_unequal_displacement(self): ''' @@ -332,7 +373,7 @@ def test_correct_neighbour_orography_unequal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 8. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 14, 10, 2.) + self.correct_neighbour(self.method, 14, 10, 2., vertical_bias='below') class min_dz_land_no_bias(TestNeighbourFinding): @@ -349,22 +390,23 @@ class min_dz_land_no_bias(TestNeighbourFinding): ''' - method = 'min_dz_land_no_bias' + method = 'minimum_height_error_neighbour' + # min_dz_land_no_bias' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' - self.return_types(self.method) + self.return_types(self.method, land_constraint=True) def test_without_ancillary_data(self): ''' Ensure an exception is raised if needed ancillary fields are missing. ''' - self.without_ancillary_data(self.method) + self.without_ancillary_data(self.method, land_constraint=True) def test_correct_neighbour_no_orography(self): '''Nearest neighbouring grid point with no other conditions''' - self.correct_neighbour(self.method, 15, 10, 10.) + self.correct_neighbour(self.method, 15, 10, 10., land_constraint=True) def test_correct_neighbour_orography(self): ''' @@ -372,7 +414,7 @@ def test_correct_neighbour_orography(self): vertical displacement. No relative altitude bias in selection. ''' self.ancillary_data['orography'].data[14, 10] = 10. - self.correct_neighbour(self.method, 14, 10, 0.) + self.correct_neighbour(self.method, 14, 10, 0., land_constraint=True) def test_correct_neighbour_orography_equal_displacement(self): ''' @@ -380,12 +422,12 @@ def test_correct_neighbour_orography_equal_displacement(self): vertical displacement. No relative altitude bias in selection. In this case of equal minimum vertical grid point displacements above - and below the site the code will select the first occurence of this + and below the site the code will select the first occurrence of this smallest dz that is comes across; (14, 10) is tested before (16, 10). ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 14, 10, 1.) + self.correct_neighbour(self.method, 14, 10, 1., land_constraint=True) def test_correct_neighbour_orography_unequal_displacement(self): ''' @@ -397,7 +439,7 @@ def test_correct_neighbour_orography_unequal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 8. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 16, 10, -1.) + self.correct_neighbour(self.method, 16, 10, -1., land_constraint=True) def test_correct_neighbour_no_orography_land(self): ''' @@ -405,8 +447,8 @@ def test_correct_neighbour_no_orography_land(self): and leaves coordinates unchanged (dz should not vary over the sea). ''' - self.ancillary_data['land'].data[15, 10] = 0. - self.correct_neighbour(self.method, 15, 10, 10.) + self.ancillary_data['land_mask'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10., land_constraint=True) def test_correct_neighbour_orography_equal_displacement_land(self): ''' @@ -420,8 +462,8 @@ def test_correct_neighbour_orography_equal_displacement_land(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.ancillary_data['land'].data[14, 10] = 0. - self.correct_neighbour(self.method, 16, 10, -1.) + self.ancillary_data['land_mask'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -1., land_constraint=True) def test_correct_neighbour_orography_unequal_displacement_land(self): ''' @@ -435,8 +477,8 @@ def test_correct_neighbour_orography_unequal_displacement_land(self): ''' self.ancillary_data['orography'].data[14, 10] = 8. self.ancillary_data['orography'].data[16, 10] = 11. - self.ancillary_data['land'].data[16, 10] = 0. - self.correct_neighbour(self.method, 14, 10, 2.) + self.ancillary_data['land_mask'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 2., land_constraint=True) class min_dz_land_biased_above(TestNeighbourFinding): @@ -454,22 +496,26 @@ class min_dz_land_biased_above(TestNeighbourFinding): ''' - method = 'min_dz_land_biased_above' + method = 'minimum_height_error_neighbour' + # min_dz_land_biased_above' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' - self.return_types(self.method) + self.return_types(self.method, vertical_bias='above', + land_constraint=True) def test_without_ancillary_data(self): ''' Ensure an exception is raised if needed ancillary fields are missing. ''' - self.without_ancillary_data(self.method) + self.without_ancillary_data(self.method, vertical_bias='above', + land_constraint=True) def test_correct_neighbour_no_orography(self): '''Nearest neighbouring grid point with no other conditions''' - self.correct_neighbour(self.method, 15, 10, 10.) + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_orography(self): ''' @@ -478,7 +524,8 @@ def test_correct_neighbour_orography(self): altitudes above the site if these are available. ''' self.ancillary_data['orography'].data[14, 10] = 10. - self.correct_neighbour(self.method, 14, 10, 0.) + self.correct_neighbour(self.method, 14, 10, 0., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_orography_equal_displacement(self): ''' @@ -493,7 +540,8 @@ def test_correct_neighbour_orography_equal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 16, 10, -1.) + self.correct_neighbour(self.method, 16, 10, -1., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_orography_unequal_displacement(self): ''' @@ -508,7 +556,8 @@ def test_correct_neighbour_orography_unequal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 12. - self.correct_neighbour(self.method, 16, 10, -2.) + self.correct_neighbour(self.method, 16, 10, -2., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_no_orography_land(self): ''' @@ -516,8 +565,9 @@ def test_correct_neighbour_no_orography_land(self): and leaves coordinates unchanged (dz should not vary over the sea). ''' - self.ancillary_data['land'].data[15, 10] = 0. - self.correct_neighbour(self.method, 15, 10, 10.) + self.ancillary_data['land_mask'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_orography_equal_displacement_land(self): ''' @@ -533,8 +583,9 @@ def test_correct_neighbour_orography_equal_displacement_land(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.ancillary_data['land'].data[16, 10] = 0. - self.correct_neighbour(self.method, 14, 10, 1.) + self.ancillary_data['land_mask'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 1., vertical_bias='above', + land_constraint=True) def test_correct_neighbour_orography_unequal_displacement_land(self): ''' @@ -551,8 +602,9 @@ def test_correct_neighbour_orography_unequal_displacement_land(self): ''' self.ancillary_data['orography'].data[14, 10] = 8. self.ancillary_data['orography'].data[16, 10] = 11. - self.ancillary_data['land'].data[16, 10] = 0. - self.correct_neighbour(self.method, 14, 10, 2.) + self.ancillary_data['land_mask'].data[16, 10] = 0. + self.correct_neighbour(self.method, 14, 10, 2., vertical_bias='above', + land_constraint=True) class min_dz_land_biased_below(TestNeighbourFinding): @@ -570,22 +622,26 @@ class min_dz_land_biased_below(TestNeighbourFinding): ''' - method = 'min_dz_land_biased_below' + method = 'minimum_height_error_neighbour' + # min_dz_land_biased_below' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' - self.return_types(self.method) + self.return_types(self.method, vertical_bias='below', + land_constraint=True) def test_without_ancillary_data(self): ''' Ensure an exception is raised if needed ancillary fields are missing. ''' - self.without_ancillary_data(self.method) + self.without_ancillary_data(self.method, vertical_bias='below', + land_constraint=True) def test_correct_neighbour_no_orography(self): '''Nearest neighbouring grid point with no other conditions''' - self.correct_neighbour(self.method, 15, 10, 10.) + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_orography(self): ''' @@ -594,7 +650,8 @@ def test_correct_neighbour_orography(self): altitudes below the site if these are available. ''' self.ancillary_data['orography'].data[14, 10] = 10. - self.correct_neighbour(self.method, 14, 10, 0.) + self.correct_neighbour(self.method, 14, 10, 0., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_orography_equal_displacement(self): ''' @@ -609,7 +666,8 @@ def test_correct_neighbour_orography_equal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 14, 10, 1.) + self.correct_neighbour(self.method, 14, 10, 1., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_orography_unequal_displacement(self): ''' @@ -624,7 +682,8 @@ def test_correct_neighbour_orography_unequal_displacement(self): ''' self.ancillary_data['orography'].data[14, 10] = 8. self.ancillary_data['orography'].data[16, 10] = 11. - self.correct_neighbour(self.method, 14, 10, 2.) + self.correct_neighbour(self.method, 14, 10, 2., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_no_orography_land(self): ''' @@ -632,8 +691,9 @@ def test_correct_neighbour_no_orography_land(self): and leaves coordinates unchanged (dz should not vary over the sea). ''' - self.ancillary_data['land'].data[15, 10] = 0. - self.correct_neighbour(self.method, 15, 10, 10.) + self.ancillary_data['land_mask'].data[15, 10] = 0. + self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_orography_equal_displacement_land(self): ''' @@ -651,8 +711,9 @@ def test_correct_neighbour_orography_equal_displacement_land(self): self.ancillary_data['orography'].data + 20.) self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 11. - self.ancillary_data['land'].data[14, 10] = 0. - self.correct_neighbour(self.method, 16, 10, -1.) + self.ancillary_data['land_mask'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -1., vertical_bias='below', + land_constraint=True) def test_correct_neighbour_orography_unequal_displacement_land(self): ''' @@ -671,8 +732,9 @@ def test_correct_neighbour_orography_unequal_displacement_land(self): self.ancillary_data['orography'].data + 20.) self.ancillary_data['orography'].data[14, 10] = 9. self.ancillary_data['orography'].data[16, 10] = 12. - self.ancillary_data['land'].data[14, 10] = 0. - self.correct_neighbour(self.method, 16, 10, -2.) + self.ancillary_data['land_mask'].data[14, 10] = 0. + self.correct_neighbour(self.method, 16, 10, -2., vertical_bias='below', + land_constraint=True) if __name__ == '__main__': From 2c90ef4b61a32fe8aceb395295061f77a93be9df Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 1 Jun 2017 08:51:28 +0100 Subject: [PATCH 0066/1367] pep8 and pylint changes that I forgot. --- lib/improver/spotdata/common_functions.py | 8 ++++--- lib/improver/spotdata/neighbour_finding.py | 5 ++-- .../spotdata/tests/test_neighbour_finding.py | 23 ++++++++++--------- 3 files changed, 19 insertions(+), 17 deletions(-) diff --git a/lib/improver/spotdata/common_functions.py b/lib/improver/spotdata/common_functions.py index b41fbfbf71..9c03c7bdb4 100644 --- a/lib/improver/spotdata/common_functions.py +++ b/lib/improver/spotdata/common_functions.py @@ -37,7 +37,7 @@ import numpy as np from iris import Constraint from iris.time import PartialDateTime - +import cartopy.crs as ccrs class ConditionalListExtract(object): """ @@ -110,6 +110,7 @@ def not_equal_to(data, array_of_indices, comparison_value): array_of_indices[1]]] != comparison_value ) + def nearest_n_neighbours(i, j, no_neighbours, exclude_self=False): """ Returns a coordinate list of n points comprising the original @@ -398,9 +399,10 @@ def xy_transform(trg_crs, latitude, longitude): return trg_crs.transform_point(longitude, latitude, ccrs.PlateCarree()) -def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): + +def isclose(val1, val2, rel_tol=1e-09, abs_tol=0.0): """ Floating point comparison for nearly equal. """ - return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + return abs(val1-val2) <= max(rel_tol * max(abs(val1), abs(val2)), abs_tol) diff --git a/lib/improver/spotdata/neighbour_finding.py b/lib/improver/spotdata/neighbour_finding.py index 4a53ffa123..bb95ab728c 100644 --- a/lib/improver/spotdata/neighbour_finding.py +++ b/lib/improver/spotdata/neighbour_finding.py @@ -32,7 +32,6 @@ """Neighbour finding for the Improver site specific process chain.""" import numpy as np -import cartopy.crs as ccrs from improver.spotdata.ancillaries import data_from_ancillary from improver.spotdata.common_functions import (ConditionalListExtract, nearest_n_neighbours, @@ -158,12 +157,12 @@ def fast_nearest_neighbour(cube, sites, orography=None): Args: ----- cube/sites : See process() above. - + orography : numpy.array Array of orography data extracted from an iris.cube.Cube that corresponds to the grids on which all other input diagnostics will be provided (iris.cube.Cube.data). - + Returns: -------- neighbours: See process() above. diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index 9828d24128..32d2156e28 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -72,12 +72,12 @@ def setUp(self): ancillary_data.update({'land_mask': land}) sites = OrderedDict() - sites.update({'100': {'latitude': 50, - 'longitude': 0, - 'altitude': 10, - 'gmtoffset': 0 - } - }) + sites['100'] = { + 'latitude': 50, + 'longitude': 0, + 'altitude': 10, + 'gmtoffset': 0 + } neighbour_list = np.empty(1, dtype=[('i', 'i8'), ('j', 'i8'), @@ -105,7 +105,8 @@ def correct_neighbour(self, method, i_expected, j_expected, dz_expected, self.assertEqual(result['j'], j_expected) self.assertEqual(result['dz'], dz_expected) - def without_ancillary_data(self, method, vertical_bias=None, land_constraint=False): + def without_ancillary_data(self, method, vertical_bias=None, + land_constraint=False): """Test plugins behaviour with no ancillary data provided""" plugin = PointSelection(method, vertical_bias, land_constraint) if method == 'fast_nearest_neighbour': @@ -121,7 +122,7 @@ class miscellaneous(TestNeighbourFinding): def test_invalid_method(self): """ Test that the plugin can handle an invalid method being passed in. - + """ plugin = PointSelection('smallest distance') msg = 'Unknown method' @@ -134,7 +135,7 @@ def test_variable_no_neighbours(self): when relaxing the 'nearest' condition. Make the smallest displacement point 2-grid cells away, so it should be captured with no_neighbours set to 25. - + """ self.ancillary_data['orography'].data[13, 10] = 10. plugin = PointSelection(method='minimum_height_error_neighbour', @@ -189,7 +190,7 @@ class min_dz_no_bias(TestNeighbourFinding): ''' method = 'minimum_height_error_neighbour' - #min_dz_no_bias + # min_dz_no_bias def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -319,7 +320,7 @@ class min_dz_biased_below(TestNeighbourFinding): ''' method = 'minimum_height_error_neighbour' - #min_dz_biased_below' + # min_dz_biased_below' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' From 78b88d07347eb84a05c6f3d76ea8b90ba82da1cc Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 1 Jun 2017 09:09:24 +0100 Subject: [PATCH 0067/1367] Removed unused variable names in unit tests. --- lib/improver/spotdata/tests/test_neighbour_finding.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index 32d2156e28..2f89b8928b 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -127,7 +127,7 @@ def test_invalid_method(self): plugin = PointSelection('smallest distance') msg = 'Unknown method' with self.assertRaisesRegexp(AttributeError, msg): - result = plugin.process(self.cube, self.sites, self.ancillary_data) + plugin.process(self.cube, self.sites, self.ancillary_data) def test_variable_no_neighbours(self): """ @@ -153,8 +153,8 @@ def test_invalid_no_neighbours(self): land_constraint=False) msg = 'Invalid nearest no' with self.assertRaisesRegexp(ValueError, msg): - result = plugin.process(self.cube, self.sites, self.ancillary_data, - no_neighbours=20) + plugin.process(self.cube, self.sites, self.ancillary_data, + no_neighbours=20) class fast_nearest_neighbour(TestNeighbourFinding): From 5151ee9c85c26114d25418980b75b7d5b539b2f5 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 1 Jun 2017 09:29:55 +0100 Subject: [PATCH 0068/1367] Edits following review comments including addition of conversion of units of the time coordinates, when the desired lead times are accessed. --- bin/improver-nbhood | 9 +- lib/improver/nbhood.py | 50 +++++++---- .../helper_functions_ensemble_calibration.py | 2 +- ...ApplyCoefficientsForEnsembleCalibration.py | 8 +- ...nsemble_calibration_EnsembleCalibration.py | 6 +- ...ensemble_calibration_EnsembleReordering.py | 8 +- ...imateCoefficientsForEnsembleCalibration.py | 6 +- ..._GeneratePercentilesFromMeanAndVariance.py | 10 +-- ...est_nbhood_basicneighbourhoodprocessing.py | 87 ++++++++++++++----- 9 files changed, 125 insertions(+), 61 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index de9fe55cef..c0b72cca32 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -51,7 +51,10 @@ def main(): metavar='RADIUS_BY_LEAD_TIME', nargs=2, help='The kernel radii for neighbourhood processing ' 'and the associated lead times at which the radii are ' - 'valid.') + 'valid. The radii are in km whilst the lead time' + 'has units of hours.' + 'The radii and lead times are expected as ' + 'comma-separated lists e.g. 10,12,14') parser.add_argument('input_filepath', metavar='INPUT_FILE', help='A path to an input NetCDF file to be processed') parser.add_argument('output_filepath', metavar='OUTPUT_FILE', @@ -62,8 +65,8 @@ def main(): radius_or_radii_in_km = args.radius_in_km lead_times = None elif args.radii_in_km_by_lead_time: - radius_or_radii_in_km = args.radii_in_km_by_lead_time[0] - lead_times = args.radius_in_km_by_lead_time[1] + radius_or_radii_in_km = args.radii_in_km_by_lead_time[0].split(",") + lead_times = args.radii_in_km_by_lead_time[1].split(",") result = ( BasicNeighbourhoodProcessing( radius_or_radii_in_km, diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 52ba95b2b2..c0ec3843e5 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -30,11 +30,10 @@ # POSSIBILITY OF SUCH DAMAGE. """Module containing neighbourhood processing utilities.""" -import numpy as np -import scipy.ndimage.filters - import iris from iris.exceptions import CoordinateNotFoundError +import numpy as np +import scipy.ndimage.filters from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes) @@ -105,7 +104,11 @@ def _find_required_lead_times(cube): """ Determine the lead times within a cube, either by reading the forecast_period coordinate, or by calculating the difference between - the time and the forecast_reference_time. + the time and the forecast_reference_time. If the forecast_period + coordinate is present, the points are assumed to represent the + desired lead times with the bounds not being considered. The units of + the forecast_period, time and forecast_reference_time coordinates are + converted, if required. Parameters ---------- @@ -120,9 +123,22 @@ def _find_required_lead_times(cube): """ if cube.coords("forecast_period"): + try: + cube.coord("forecast_period").convert_units("hours") + except ValueError as err: + msg = "For forecast_period: {}".format(err) + raise ValueError(msg) required_lead_times = cube.coord("forecast_period").points else: if cube.coords("time") and cube.coords("forecast_reference_time"): + try: + cube.coord("time").convert_units( + "hours since 1970-01-01 00:00:00") + cube.coord("forecast_reference_time").convert_units( + "hours since 1970-01-01 00:00:00") + except ValueError as err: + msg = "For time/forecast_reference_time: {}".format(err) + raise ValueError(msg) required_lead_times = ( cube.coord("time").points - cube.coord("forecast_reference_time").points) @@ -135,7 +151,7 @@ def _find_required_lead_times(cube): raise CoordinateNotFoundError(msg) return required_lead_times - def _get_grid_x_y_kernel_ranges(self, cube, radii_in_km): + def _get_grid_x_y_kernel_ranges(self, cube, radius_in_km): """ Return the number of grid cells in the x and y direction to be used to create the kernel. @@ -145,8 +161,8 @@ def _get_grid_x_y_kernel_ranges(self, cube, radii_in_km): cube : Iris.cube.Cube Cube containing the x and y coordinates, which will be used for calculating the number of grid cells in the x and y direction, - which equates to the size of the desired radii. - radii_in_km : Float + which equates to the size of the desired radius. + radius_in_km : Float Radius in kilometres for use in specifying the number of grid cells used to create a kernel. @@ -169,25 +185,25 @@ def _get_grid_x_y_kernel_ranges(self, cube, radii_in_km): y_coord.convert_units("metres") d_north_metres = y_coord.points[1] - y_coord.points[0] d_east_metres = x_coord.points[1] - x_coord.points[0] - grid_cells_y = int(radii_in_km * 1000 / abs(d_north_metres)) - grid_cells_x = int(radii_in_km * 1000 / abs(d_east_metres)) + grid_cells_y = int(radius_in_km * 1000 / abs(d_north_metres)) + grid_cells_x = int(radius_in_km * 1000 / abs(d_east_metres)) if grid_cells_x == 0 or grid_cells_y == 0: raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(radii_in_km) + + "{0} km ".format(radius_in_km) + "gives zero cell extent") ) elif grid_cells_x < 0 or grid_cells_y < 0: raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(radii_in_km) + + "{0} km ".format(radius_in_km) + "gives a negative cell extent") ) if (grid_cells_x > self.MAX_KERNEL_CELL_RADIUS or grid_cells_y > self.MAX_KERNEL_CELL_RADIUS): raise ValueError( ("Neighbourhood processing radius of " + - "{0} km ".format(radii_in_km) + + "{0} km ".format(radius_in_km) + "exceeds maximum grid cell extent") ) return grid_cells_x, grid_cells_y @@ -229,10 +245,10 @@ def _apply_kernel_for_smoothing(self, cube, ranges): # contained within the desired radius. kernel = np.ones([int(1 + x * 2) for x in fullranges]) # Create an open multi-dimensional meshgrid. - meshgrid = np.ogrid[tuple([slice(-x, x+1) for x in ranges])] + open_grid = np.ogrid[tuple([slice(-x, x+1) for x in ranges])] if self.unweighted_mode: mask = np.reshape( - np.sum([x ** 2 for x in meshgrid]) > np.cumprod(ranges)[-1], + np.sum([x ** 2 for x in open_grid]) > np.cumprod(ranges)[-1], np.shape(kernel) ) else: @@ -240,7 +256,7 @@ def _apply_kernel_for_smoothing(self, cube, ranges): # highest weighting, with the weighting decreasing with distance # away from the central grid point. kernel[:] = ( - (np.cumprod(ranges)[-1] - np.sum([x**2. for x in meshgrid])) / + (np.cumprod(ranges)[-1] - np.sum([x**2. for x in open_grid])) / np.cumprod(ranges)[-1] ) mask = kernel < 0. @@ -289,10 +305,10 @@ def process(self, cube): cubes = iris.cube.CubeList([]) # Find the number of grid cells required for creating the kernel, # and then apply the kernel to smooth the field. - for cube_slice, radii_in_km in ( + for cube_slice, radius_in_km in ( zip(cube.slices_over("time"), required_radii_in_km)): ranges = self._get_grid_x_y_kernel_ranges( - cube_slice, radii_in_km) + cube_slice, radius_in_km) cube_slice = ( self._apply_kernel_for_smoothing(cube_slice, ranges)) cube_slice = iris.util.new_axis(cube_slice, "time") diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index dadff94fb7..6e11f12787 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -80,7 +80,7 @@ def set_up_wind_speed_cube(): return set_up_cube(data, "wind_speed", "m s^-1") -def _add_forecast_reference_time_and_forecast_period( +def add_forecast_reference_time_and_forecast_period( cube, time_point=402295.0, fp_point=4.0): """ Function to add forecast_reference_time and forecast_period coordinates diff --git a/lib/improver/tests/test_ensemble_calibration_ApplyCoefficientsForEnsembleCalibration.py b/lib/improver/tests/test_ensemble_calibration_ApplyCoefficientsForEnsembleCalibration.py index b5a6c93f80..124e289c96 100644 --- a/lib/improver/tests/test_ensemble_calibration_ApplyCoefficientsForEnsembleCalibration.py +++ b/lib/improver/tests/test_ensemble_calibration_ApplyCoefficientsForEnsembleCalibration.py @@ -51,7 +51,7 @@ concatenate_cubes) from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + add_forecast_reference_time_and_forecast_period) def datetime_from_timestamp(timestamp): @@ -94,7 +94,7 @@ def test_length_one_coords_list_of_coords(self): def test_check_all_coords(self): """Test that the plugin returns a DimCoord inside the list.""" current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) plugin = Plugin( current_temperature_forecast_cube, @@ -308,7 +308,7 @@ def setUp(self): self.cube = set_up_temperature_cube() self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.coeff_names = ["gamma", "delta", "a", "beta"] @@ -423,7 +423,7 @@ def setUp(self): self.cube = set_up_temperature_cube() self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.coeff_names = ["gamma", "delta", "a", "beta"] diff --git a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibration.py b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibration.py index 4ce30e5599..6507a3eebe 100644 --- a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibration.py +++ b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibration.py @@ -42,7 +42,7 @@ EnsembleCalibration as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, set_up_wind_speed_cube, - _add_forecast_reference_time_and_forecast_period, + add_forecast_reference_time_and_forecast_period, _create_historic_forecasts, _create_truth) @@ -53,7 +53,7 @@ class Test_process(IrisTest): def setUp(self): """Set up temperature and wind speed cubes for testing.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.historic_temperature_forecast_cube = ( @@ -63,7 +63,7 @@ def setUp(self): _create_truth(self.current_temperature_forecast_cube)) self.current_wind_speed_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_wind_speed_cube())) self.historic_wind_speed_forecast_cube = ( diff --git a/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py b/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py index 0439a90df5..422a4dd153 100644 --- a/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py @@ -43,7 +43,7 @@ EnsembleReordering as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + add_forecast_reference_time_and_forecast_period) class Test_rank_ecc(IrisTest): @@ -56,7 +56,7 @@ def setUp(self): forecast_period coordinates. """ self.cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): @@ -302,10 +302,10 @@ def setUp(self): forecast_period coordinates. """ self.raw_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.calibrated_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): diff --git a/lib/improver/tests/test_ensemble_calibration_EstimateCoefficientsForEnsembleCalibration.py b/lib/improver/tests/test_ensemble_calibration_EstimateCoefficientsForEnsembleCalibration.py index 29fb7af993..82011f005e 100644 --- a/lib/improver/tests/test_ensemble_calibration_EstimateCoefficientsForEnsembleCalibration.py +++ b/lib/improver/tests/test_ensemble_calibration_EstimateCoefficientsForEnsembleCalibration.py @@ -46,7 +46,7 @@ EstimateCoefficientsForEnsembleCalibration as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, set_up_wind_speed_cube, - _add_forecast_reference_time_and_forecast_period, + add_forecast_reference_time_and_forecast_period, _create_historic_forecasts, _create_truth) @@ -339,7 +339,7 @@ class Test_estimate_coefficients_for_ngr(IrisTest): def setUp(self): """Set up multiple cubes for testing.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.historic_temperature_forecast_cube = ( @@ -349,7 +349,7 @@ def setUp(self): _create_truth(self.current_temperature_forecast_cube)) self.current_wind_speed_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_wind_speed_cube())) self.historic_wind_speed_forecast_cube = ( diff --git a/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py index b43cb47c8b..ca054da0c1 100644 --- a/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py @@ -45,7 +45,7 @@ from improver.ensemble_calibration.ensemble_calibration import ( GeneratePercentilesFromMeanAndVariance as Plugin) from improver.tests.helper_functions_ensemble_calibration import( - set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) + set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) class Test__create_cube_with_percentiles(IrisTest): @@ -55,7 +55,7 @@ class Test__create_cube_with_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): @@ -124,7 +124,7 @@ class Test__mean_and_variance_to_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_check_data(self): @@ -328,7 +328,7 @@ class Test_create_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): @@ -391,7 +391,7 @@ class Test_process(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index 98bfec6c45..c8a1d83d3e 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -46,7 +46,7 @@ from improver.grids.osgb import OSGBGRID from improver.nbhood import BasicNeighbourhoodProcessing as NBHood from improver.tests.helper_functions_ensemble_calibration import ( - _add_forecast_reference_time_and_forecast_period) + add_forecast_reference_time_and_forecast_period) SINGLE_POINT_RANGE_3_CENTROID = np.array([ @@ -178,7 +178,7 @@ def test_radii_varying_with_lead_time_mismatch(self): iris.util.promote_aux_coord_to_dim_coord(cube, "time") time_points = cube.coord("time").points fp_points = [2, 3, 4] - cube = _add_forecast_reference_time_and_forecast_period( + cube = add_forecast_reference_time_and_forecast_period( cube, time_point=time_points, fp_point=fp_points) radii_in_km = [10, 20, 30] lead_times = [2, 3] @@ -195,7 +195,7 @@ class Test__find_required_lead_times(IrisTest): def test_basic(self): """Test that a list is returned.""" - cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) plugin = NBHood(self.RADIUS_IN_KM) result = plugin._find_required_lead_times(cube) self.assertIsInstance(result, np.ndarray) @@ -205,7 +205,7 @@ def test_check_coordinate(self): Test that the data within the list is as expected, when the input cube has a forecast_period coordinate. """ - cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) expected_result = cube.coord("forecast_period").points plugin = NBHood(self.RADIUS_IN_KM) result = plugin._find_required_lead_times(cube) @@ -217,7 +217,7 @@ def test_check_coordinate_without_forecast_period(self): the input cube has a time coordinate and a forecast_reference_time coordinate. """ - cube = _add_forecast_reference_time_and_forecast_period(set_up_cube()) + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) cube.remove_coord("forecast_period") expected_result = ( cube.coord("time").points - @@ -226,6 +226,61 @@ def test_check_coordinate_without_forecast_period(self): result = plugin._find_required_lead_times(cube) self.assertArrayAlmostEqual(result, expected_result) + def test_check_forecast_period_unit_conversion(self): + """ + Test that the data within the list is as expected, when + the input cube has a forecast_period coordinate with units + other than the desired units of hours. + """ + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) + expected_result = cube.coord("forecast_period").points + cube.coord("forecast_period").convert_units("seconds") + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin._find_required_lead_times(cube) + self.assertArrayAlmostEqual(result, expected_result) + + def test_check_time_unit_conversion(self): + """ + Test that the data within the list is as expected, when + the input cube has a time coordinate with units + other than the desired units of hours since 1970-01-01 00:00:00. + """ + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) + expected_result = cube.coord("forecast_period").points + cube.coord("time").convert_units("seconds since 1970-01-01 00:00:00") + plugin = NBHood(self.RADIUS_IN_KM) + result = plugin._find_required_lead_times(cube) + self.assertArrayAlmostEqual(result, expected_result) + + def test_check_forecast_period_unit_conversion_exception(self): + """ + Test that an exception is raised, when the input cube has a + forecast_period coordinate with units that can not be converted + into hours. + """ + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) + expected_result = cube.coord("forecast_period").points + cube.coord("forecast_period").units = Unit("Celsius") + plugin = NBHood(self.RADIUS_IN_KM) + msg = "For forecast_period" + with self.assertRaisesRegexp(ValueError, msg): + plugin._find_required_lead_times(cube) + + def test_check_forecast_reference_time_unit_conversion_exception(self): + """ + Test that an exception is raised, when the input cube has a + forecast_reference_time coordinate with units that can not be + converted into hours. + """ + cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) + cube.remove_coord("forecast_period") + expected_result = cube.coord("forecast_reference_time").points + cube.coord("forecast_reference_time").units = Unit("Celsius") + plugin = NBHood(self.RADIUS_IN_KM) + msg = "For time/forecast_reference_time" + with self.assertRaisesRegexp(ValueError, msg): + plugin._find_required_lead_times(cube) + def test_exception_raised(self): """ Test that a CoordinateNotFoundError exception is raised if the @@ -650,7 +705,7 @@ def test_radii_varying_with_lead_time(self): iris.util.promote_aux_coord_to_dim_coord(cube, "time") time_points = cube.coord("time").points fp_points = [2, 3, 4] - cube = _add_forecast_reference_time_and_forecast_period( + cube = add_forecast_reference_time_and_forecast_period( cube, time_point=time_points, fp_point=fp_points) radii_in_km = [10, 20, 30] lead_times = [2, 3, 4] @@ -672,12 +727,7 @@ def test_radii_varying_with_lead_time_check_data(self): [0.875, 0.83333333, 0.875], [0.91666667, 0.875, 0.91666667]) - expected[1, 5:10, 5:10] = ( - [0.992, 0.968, 0.96, 0.968, 0.992], - [0.968, 0.944, 0.936, 0.944, 0.968], - [0.96, 0.936, 0.928, 0.936, 0.96], - [0.968, 0.944, 0.936, 0.944, 0.968], - [0.992, 0.968, 0.96, 0.968, 0.992]) + expected[1, 5:10, 5:10] = SINGLE_POINT_RANGE_3_CENTROID expected[2, 4:11, 4:11] = ( [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1], @@ -691,7 +741,7 @@ def test_radii_varying_with_lead_time_check_data(self): iris.util.promote_aux_coord_to_dim_coord(cube, "time") time_points = cube.coord("time").points fp_points = [2, 3, 4] - cube = _add_forecast_reference_time_and_forecast_period( + cube = add_forecast_reference_time_and_forecast_period( cube, time_point=time_points, fp_point=fp_points) radii_in_km = [6, 8, 10] lead_times = [2, 3, 4] @@ -708,7 +758,7 @@ def test_radii_varying_with_lead_time_with_interpolation(self): iris.util.promote_aux_coord_to_dim_coord(cube, "time") time_points = cube.coord("time").points fp_points = [2, 3, 4] - cube = _add_forecast_reference_time_and_forecast_period( + cube = add_forecast_reference_time_and_forecast_period( cube, time_point=time_points, fp_point=fp_points) radii_in_km = [10, 30] lead_times = [2, 4] @@ -727,12 +777,7 @@ def test_radii_varying_with_lead_time_with_interpolation_check_data(self): [0.875, 0.83333333, 0.875], [0.91666667, 0.875, 0.91666667]) - expected[1, 5:10, 5:10] = ( - [0.992, 0.968, 0.96, 0.968, 0.992], - [0.968, 0.944, 0.936, 0.944, 0.968], - [0.96, 0.936, 0.928, 0.936, 0.96], - [0.968, 0.944, 0.936, 0.944, 0.968], - [0.992, 0.968, 0.96, 0.968, 0.992]) + expected[1, 5:10, 5:10] = SINGLE_POINT_RANGE_3_CENTROID expected[2, 4:11, 4:11] = ( [1, 0.9925, 0.985, 0.9825, 0.985, 0.9925, 1], @@ -746,7 +791,7 @@ def test_radii_varying_with_lead_time_with_interpolation_check_data(self): iris.util.promote_aux_coord_to_dim_coord(cube, "time") time_points = cube.coord("time").points fp_points = [2, 3, 4] - cube = _add_forecast_reference_time_and_forecast_period( + cube = add_forecast_reference_time_and_forecast_period( cube, time_point=time_points, fp_point=fp_points) radii_in_km = [6, 10] lead_times = [2, 4] From 807628febb540cbc2c1764c812d8fd3285b6bfb1 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 1 Jun 2017 09:33:14 +0100 Subject: [PATCH 0069/1367] Changed to supplementary files. --- .gitignore | 3 --- lib/improver/spotdata/__init__.py | 37 ++++++++++++++++++++++++++----- 2 files changed, 31 insertions(+), 9 deletions(-) diff --git a/.gitignore b/.gitignore index c09bb4c49b..fc9ec3a569 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,3 @@ # Site-specific setup etc/site-init - -# Output data -*.nc diff --git a/lib/improver/spotdata/__init__.py b/lib/improver/spotdata/__init__.py index 631c0ab730..44cb91193d 100644 --- a/lib/improver/spotdata/__init__.py +++ b/lib/improver/spotdata/__init__.py @@ -1,8 +1,33 @@ -""" -Provides support routines. -""" +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" Provides support routines for SpotData. """ import os - -# Path to the ancillary data. -_root_dir = os.path.dirname(__file__) From f4541b0356dce65d02dc536d9dd42768118e4757 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 1 Jun 2017 09:41:25 +0100 Subject: [PATCH 0070/1367] Small docstring edits and correction to CLI test. --- bin/improver-nbhood | 4 ++-- lib/improver/nbhood.py | 3 ++- tests/improver-nbhood/01-help.bats | 3 +++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index c0b72cca32..aea294081a 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -51,10 +51,10 @@ def main(): metavar='RADIUS_BY_LEAD_TIME', nargs=2, help='The kernel radii for neighbourhood processing ' 'and the associated lead times at which the radii are ' - 'valid. The radii are in km whilst the lead time' + 'valid. The radii are in km whilst the lead time ' 'has units of hours.' 'The radii and lead times are expected as ' - 'comma-separated lists e.g. 10,12,14') + 'comma-separated lists e.g. 10,12,14.') parser.add_argument('input_filepath', metavar='INPUT_FILE', help='A path to an input NetCDF file to be processed') parser.add_argument('output_filepath', metavar='OUTPUT_FILE', diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index c0ec3843e5..8067a4961f 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -73,7 +73,8 @@ def __init__(self, radii_in_km, lead_times=None, unweighted_mode=False): at the zero indices of the cube projection-x/y coords. lead_times : None or List List of lead times or forecast periods, at which the radii - within radii_in_km are defined. + within radii_in_km are defined. The lead times are expected + in hours. unweighted_mode : boolean If True, use a circle with constant weighting. If False, use a circle for neighbourhood kernel with diff --git a/tests/improver-nbhood/01-help.bats b/tests/improver-nbhood/01-help.bats index fc491781bd..620a99e2ef 100644 --- a/tests/improver-nbhood/01-help.bats +++ b/tests/improver-nbhood/01-help.bats @@ -23,6 +23,9 @@ optional arguments: --radii-in-km-by-lead-time RADIUS_BY_LEAD_TIME RADIUS_BY_LEAD_TIME The kernel radii for neighbourhood processing and the associated lead times at which the radii are valid. + The radii are in km whilst the lead time has units of + hours.The radii and lead times are expected as comma- + separated lists e.g. 10,12,14. __HELP__ [[ "$output" == "$expected" ]] } From 563f882c7325e1999344279e59824e48b3569518 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 1 Jun 2017 09:47:46 +0100 Subject: [PATCH 0071/1367] Small Codacy fix. --- lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index c8a1d83d3e..ab7fbb8d0d 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -259,7 +259,6 @@ def test_check_forecast_period_unit_conversion_exception(self): into hours. """ cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) - expected_result = cube.coord("forecast_period").points cube.coord("forecast_period").units = Unit("Celsius") plugin = NBHood(self.RADIUS_IN_KM) msg = "For forecast_period" @@ -274,7 +273,6 @@ def test_check_forecast_reference_time_unit_conversion_exception(self): """ cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) cube.remove_coord("forecast_period") - expected_result = cube.coord("forecast_reference_time").points cube.coord("forecast_reference_time").units = Unit("Celsius") plugin = NBHood(self.RADIUS_IN_KM) msg = "For time/forecast_reference_time" From 76f9d0c31ca6368f7ea7dff32ef0e181284472d1 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 1 Jun 2017 09:51:45 +0100 Subject: [PATCH 0072/1367] Added copying of coordinate points in unit tests. --- .../tests/test_nbhood_basicneighbourhoodprocessing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py index ab7fbb8d0d..f8f5bebcbf 100644 --- a/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py +++ b/lib/improver/tests/test_nbhood_basicneighbourhoodprocessing.py @@ -233,7 +233,7 @@ def test_check_forecast_period_unit_conversion(self): other than the desired units of hours. """ cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) - expected_result = cube.coord("forecast_period").points + expected_result = cube.coord("forecast_period").points.copy() cube.coord("forecast_period").convert_units("seconds") plugin = NBHood(self.RADIUS_IN_KM) result = plugin._find_required_lead_times(cube) @@ -246,7 +246,7 @@ def test_check_time_unit_conversion(self): other than the desired units of hours since 1970-01-01 00:00:00. """ cube = add_forecast_reference_time_and_forecast_period(set_up_cube()) - expected_result = cube.coord("forecast_period").points + expected_result = cube.coord("forecast_period").points.copy() cube.coord("time").convert_units("seconds since 1970-01-01 00:00:00") plugin = NBHood(self.RADIUS_IN_KM) result = plugin._find_required_lead_times(cube) From 72479c95b6fd8598de1ce6502185b21c375ea63e Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Thu, 1 Jun 2017 10:32:19 +0100 Subject: [PATCH 0073/1367] Final change from BenF comments. --- lib/improver/spotdata/ancillaries.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/improver/spotdata/ancillaries.py b/lib/improver/spotdata/ancillaries.py index 09e48fc9a4..a54a17b3c1 100644 --- a/lib/improver/spotdata/ancillaries.py +++ b/lib/improver/spotdata/ancillaries.py @@ -116,5 +116,5 @@ def data_from_ancillary(ancillary_data, key): if key in ancillary_data.keys(): return ancillary_data[key].data - else: - raise Exception('Ancillary data {} has not been loaded.'.format(key)) + + raise Exception('Ancillary data {} has not been loaded.'.format(key)) From 660d1698ed2672e7b5f1598c4afac26ff737d6b7 Mon Sep 17 00:00:00 2001 From: Caroline Jones Date: Fri, 2 Jun 2017 10:13:31 +0100 Subject: [PATCH 0074/1367] Adding changes to the blending weights function as identified by the reviewers --- ...test_weights_ChooseDefaultWeightsLinear.py | 39 +++++----- ...t_weights_ChooseDefaultWeightsNonLinear.py | 33 +++++---- lib/improver/weights.py | 73 +++++++++++-------- 3 files changed, 83 insertions(+), 62 deletions(-) diff --git a/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py b/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py index 9db67e679d..808b7d02f6 100644 --- a/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py +++ b/lib/improver/tests/test_weights_ChooseDefaultWeightsLinear.py @@ -44,12 +44,15 @@ def add_realizations(cube, num): - """ Create num realizations of input cube + """ Create num realizations of input cube. Args: - cube =iris.cube.Cube - input cube - num = integer - Number of realizations - Returns - cubeout = iris.cube.Cube - copy of cube with num realizations added + cube : iris.cube.Cube + Input cube. + num : integer + Number of realizations. + Returns: + cubeout : iris.cube.Cube + Copy of cube with num realizations added. """ cubelist = iris.cube.CubeList() for i in range(0, num): @@ -63,7 +66,7 @@ def add_realizations(cube, num): class TestChooseDefaultWeightsLinear(IrisTest): - """ Test the Default Linear Weights plugin """ + """Test the Default Linear Weights plugin. """ def setUp(self): data = np.zeros((2, 2, 2)) @@ -87,21 +90,21 @@ def setUp(self): self.cube = cube def test_basic(self): - """ Test that the plugin retuns an array of weights """ + """Test that the plugin returns an array of weights. """ coord = "time" plugin = LinearWeights() result = plugin.process(self.cube, coord) self.assertIsInstance(result, np.ndarray) def test_array_sum_equals_one(self): - """ Test that the resulting weights add up to one """ + """Test that the resulting weights add up to one. """ coord = "time" plugin = LinearWeights() result = plugin.process(self.cube, coord) self.assertAlmostEquals(result.sum(), 1.0) def test_fails_coord_not_in_cube(self): - """Test it raises a Value Error if coord not in the cube.""" + """Test it raises a Value Error if coord not in the cube. """ coord = "notset" plugin = LinearWeights() msg = ('The coord for this plugin must be ' @@ -110,7 +113,7 @@ def test_fails_coord_not_in_cube(self): plugin.process(self.cube, coord) def test_fails_input_not_a_cube(self): - """Test it raises a Value Error if not supplied with a cube.""" + """Test it raises a Value Error if not supplied with a cube. """ coord = "time" plugin = LinearWeights() notacube = 0.0 @@ -120,7 +123,7 @@ def test_fails_input_not_a_cube(self): plugin.process(notacube, coord) def test_fails_y0val_lessthan_zero(self): - """ Test it raises a Value Error if y0val less than zero """ + """Test it raises a Value Error if y0val less than zero. """ coord = "time" plugin = LinearWeights(y0val=-10.0) msg = ('y0val must be a float > 0.0') @@ -128,7 +131,7 @@ def test_fails_y0val_lessthan_zero(self): plugin.process(self.cube, coord) def test_fails_ynval_and_slope_set(self): - """ Test it raises a Value Error if slope and ynval set """ + """Test it raises a Value Error if slope and ynval set. """ coord = "time" plugin = LinearWeights(y0val=10.0, slope=-5.0, ynval=5.0) msg = ('Relative end point weight or slope must be set' @@ -137,7 +140,7 @@ def test_fails_ynval_and_slope_set(self): plugin.process(self.cube, coord) def test_fails_weights_negative(self): - """ Test it raises a Value Error if weights become negative """ + """Test it raises a Value Error if weights become negative. """ coord = "realization" plugin = LinearWeights(y0val=10.0, slope=-5.0) cubenew = add_realizations(self.cube, 6) @@ -146,14 +149,14 @@ def test_fails_weights_negative(self): plugin.process(cubenew, coord) def test_works_scalar_coord(self): - """Test it works if scalar coordinate.""" + """Test it works if scalar coordinate. """ coord = 'scalar_coord' plugin = LinearWeights() result = plugin.process(self.cube, coord) self.assertArrayAlmostEqual(result, np.array([1.0])) def test_works_defaults_used(self): - """Test it works if scalar coordinate.""" + """Test it works if defaults used. """ coord = "time" plugin = LinearWeights() result = plugin.process(self.cube, coord) @@ -161,7 +164,7 @@ def test_works_defaults_used(self): self.assertArrayAlmostEqual(result, expected_result) def test_works_y0val_and_slope_set(self): - """Test it works if y0val and slope_set.""" + """Test it works if y0val and slope_set. """ coord = "time" plugin = LinearWeights(y0val=10.0, slope=-5.0) result = plugin.process(self.cube, coord) @@ -169,7 +172,7 @@ def test_works_y0val_and_slope_set(self): self.assertArrayAlmostEqual(result, expected_result) def test_works_y0val_and_ynval_set(self): - """Test it works if scalar coordinate.""" + """Test it works if y0val and ynval set. """ coord = "time" plugin = LinearWeights(y0val=10.0, ynval=5.0) result = plugin.process(self.cube, coord) @@ -177,7 +180,7 @@ def test_works_y0val_and_ynval_set(self): self.assertArrayAlmostEqual(result, expected_result) def test_works_with_larger_num(self): - """Test it works with larger num_of_vals""" + """Test it works with larger num_of_vals. """ coord = "realization" plugin = LinearWeights(y0val=10.0, ynval=5.0) cubenew = add_realizations(self.cube, 6) diff --git a/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py b/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py index 59c36ba9ab..937b1eceb9 100644 --- a/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py +++ b/lib/improver/tests/test_weights_ChooseDefaultWeightsNonLinear.py @@ -44,12 +44,15 @@ def add_realizations(cube, num): - """ Create num realizations of input cube + """Create num realizations of input cube. Args: - cube =iris.cube.Cube - input cube - num = integer - Number of realizations - Returns - cubeout = iris.cube.Cube - copy of cube with num realizations added + cube : iris.cube.Cube + input cube. + num : integer + Number of realizations. + Returns: + cubeout : iris.cube.Cube + copy of cube with num realizations added. """ cubelist = iris.cube.CubeList() for i in range(0, num): @@ -63,7 +66,7 @@ def add_realizations(cube, num): class TestChooseDefaultWeightsNonLinear(IrisTest): - """ Test the Default non-Linear Weights plugin """ + """Test the Default non-Linear Weights plugin. """ def setUp(self): data = np.zeros((2, 2, 2)) @@ -87,21 +90,21 @@ def setUp(self): self.cube = cube def test_basic(self): - """ Test that the plugin retuns an array of weights """ + """Test that the plugin returns an array of weights. """ coord = "time" plugin = NonLinearWeights() result = plugin.process(self.cube, coord) self.assertIsInstance(result, np.ndarray) def test_array_sum_equals_one(self): - """ Test that the resulting weights add up to one """ + """Test that the resulting weights add up to one. """ coord = "time" plugin = NonLinearWeights() result = plugin.process(self.cube, coord) self.assertAlmostEquals(result.sum(), 1.0) def test_fails_coord_not_in_cube(self): - """Test it raises a Value Error if coord not in the cube.""" + """Test it raises a Value Error if coord not in the cube. """ coord = "notset" plugin = NonLinearWeights() msg = ('The coord for this plugin must be ' @@ -110,7 +113,7 @@ def test_fails_coord_not_in_cube(self): plugin.process(self.cube, coord) def test_fails_input_not_a_cube(self): - """Test it raises a Value Error if not supplied with a cube.""" + """Test it raises a Value Error if not supplied with a cube. """ coord = "time" plugin = NonLinearWeights() notacube = 0.0 @@ -127,7 +130,7 @@ def test_fails_if_cval_not_valid(self): coord = "time" plugin = NonLinearWeights(cval=-1.0) msg = ('cval must be greater than 0.0 and less ' - 'than or equal to 1.0 ') + 'than or equal to 1.0') with self.assertRaisesRegexp(ValueError, msg): plugin.process(self.cube, coord) plugin2 = NonLinearWeights(cval=1.1) @@ -135,14 +138,14 @@ def test_fails_if_cval_not_valid(self): plugin2.process(self.cube, coord) def test_works_if_scalar_coord(self): - """Test it works if scalar coordinate.""" + """Test it works if scalar coordinate. """ coord = "scalar_coord" plugin = NonLinearWeights() result = plugin.process(self.cube, coord) self.assertArrayAlmostEqual(result, np.array([1.0])) def test_works_with_default_cval(self): - """Test it works with default cval.""" + """Test it works with default cval. """ coord = "time" plugin = NonLinearWeights() result = plugin.process(self.cube, coord) @@ -150,7 +153,7 @@ def test_works_with_default_cval(self): self.assertArrayAlmostEqual(result, expected_result) def test_works_with_cval_equal_one(self): - """Test it works with cval = 1.0, i.e. equal weights.""" + """Test it works with cval = 1.0, i.e. equal weights. """ coord = "time" plugin = NonLinearWeights(cval=1.0) result = plugin.process(self.cube, coord) @@ -158,7 +161,7 @@ def test_works_with_cval_equal_one(self): self.assertArrayAlmostEqual(result, expected_result) def test_works_with_larger_num(self): - """Test it works with larger num_of_vals""" + """Test it works with larger num_of_vals. """ coord = "realization" plugin = NonLinearWeights(cval=0.5) cubenew = add_realizations(self.cube, 6) diff --git a/lib/improver/weights.py b/lib/improver/weights.py index d1a51298be..59b493684d 100644 --- a/lib/improver/weights.py +++ b/lib/improver/weights.py @@ -36,14 +36,14 @@ def normalise_weights(weights): - """Ensures all weights add up to one + """Ensures all weights add up to one. Args: weights : array of weights. Returns: - normalised_weights : array of weights where - sum = 1.0. + normalised_weights : array of weights + where sum = 1.0 """ if weights.min() < 0.0: msg = 'Weights must be positive, at least one value < 0.0' @@ -59,12 +59,14 @@ def normalise_weights(weights): def nonlinear_weights(num_of_weights, cval): - """Create nonlinear weights + """Create nonlinear weights. Args: - num_of_weights : Positive Integer - Number of weights to create. + num_of_weights : Positive Integer + Number of weights to create. - cval : Float - greater than 0.0 but less than or equal to 1,0, + cval : Float + greater than 0.0 but less than or equal to 1,0, to be used for the nonlinear weights function. 1.0 = equal weights for all. @@ -100,18 +102,22 @@ def linear_weights(num_of_weights, y0val=1.0, slope=0.0, """Create linear weights Args: - num_of_weights : Positive Integer: Number of weights to create. - y0val = positive float: + num_of_weights : Positive Integer + Number of weights to create. + y0val : Positive float relative value of starting point. Default = 1.0 AND EITHER: - slope = float: slope of the line. Default = 0.0 (equal weights) + slope : float + slope of the line. Default = 0.0 (equal weights) OR - ynval = float or None: relative weights of last point. - Default value is None + ynval : Positive float or None + Relative weights of last point. + Default value is None Returns: - weights : array of weights, sum of all weights = 1.0 + weights : array of weights + sum of all weights = 1.0 """ if not isinstance(num_of_weights, int) or num_of_weights <= 0: @@ -148,16 +154,21 @@ class ChooseDefaultWeightsLinear(object): def __init__(self, y0val=None, slope=0.0, ynval=None): """Set up for calculating default weights using linear function - y0val = None or positive float: relative value of starting point. - slope = float: slope of the line. Default = 0.0 (equal weights) - ynval = float or None: relative weights of last point. - Default value is None - slope OR ynval should be set but NOT BOTH + Args: + y0val : None or positive float + Relative value of starting point. + slope : float + Slope of the line. Default = 0.0 (equal weights). + ynval : float or None + Relative weights of last point. + Default value is None + + slope OR ynval should be set but NOT BOTH. - If y0val value is not set or set to None - then the code assumes that the ultimate default values of - y0val = 20.0 and ynval = 2.0 are required + If y0val value is not set or set to None then the code + assumes that the ultimate default values of + y0val = 20.0 and ynval = 2.0 are required. equal weights when slope = 0.0 or y0val = ynval """ @@ -170,7 +181,7 @@ def __init__(self, y0val=None, slope=0.0, ynval=None): self.y0val = y0val def process(self, cube, coord): - """Calculated weights for a given cube and coord + """Calculated weights for a given cube and coord. Args: cube : iris.cube.Cube @@ -188,7 +199,7 @@ def process(self, cube, coord): if not cube.coords(coord): msg = ('The coord for this plugin must be ' - 'an existing coordinate in the input cube') + 'an existing coordinate in the input cube.') raise ValueError(msg) num_of_weights = len(cube.coord(coord).points) @@ -201,7 +212,7 @@ def process(self, cube, coord): def __repr__(self): """Represent the configured plugin instance as a string.""" - desc = ''.format(self.cval)) return desc From 0856af9ac04213f2915fdae4c63053db844fff9f Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Fri, 2 Jun 2017 10:24:43 +0100 Subject: [PATCH 0075/1367] Addressing the rest of Gavin's comments. --- lib/improver/spotdata/__init__.py | 2 - lib/improver/spotdata/ancillaries.py | 37 +--- lib/improver/spotdata/common_functions.py | 123 +++++++++--- lib/improver/spotdata/neighbour_finding.py | 30 +-- lib/improver/spotdata/read_input.py | 187 ++++++++++++++++++ .../spotdata/tests/test_neighbour_finding.py | 127 ++++++------ 6 files changed, 374 insertions(+), 132 deletions(-) create mode 100644 lib/improver/spotdata/read_input.py diff --git a/lib/improver/spotdata/__init__.py b/lib/improver/spotdata/__init__.py index 44cb91193d..11486c3cb3 100644 --- a/lib/improver/spotdata/__init__.py +++ b/lib/improver/spotdata/__init__.py @@ -29,5 +29,3 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Provides support routines for SpotData. """ - -import os diff --git a/lib/improver/spotdata/ancillaries.py b/lib/improver/spotdata/ancillaries.py index a54a17b3c1..520ccff6f6 100644 --- a/lib/improver/spotdata/ancillaries.py +++ b/lib/improver/spotdata/ancillaries.py @@ -51,6 +51,9 @@ def get_ancillary_data(diagnostics, ancillary_path): options for how they should be produced, e.g. method of neighbour selection, method of data extraction etc. + ancillary_path : string + String giving the path of ancillary files to be used. + Returns: -------- ancillary_data : dict @@ -84,37 +87,3 @@ def get_ancillary_data(diagnostics, ancillary_path): ancillary_data['land_mask'] = land return ancillary_data - - -# Function that checks the presence of ancillary data when it is used and -# raises an exception if it is missing. - -def data_from_ancillary(ancillary_data, key): - """ - Check for an iris.cube.Cube of information in the ancillary data - dictionary. - - Args: - ----- - ancillary_data : dict - Dictionary defined by get_ancillary_data function that contains - iris.cube.Cube ancillary data. - - key : string - Name of ancillary field requested. - - Returns: - -------- - data : numpy.array - Ancillary data array extracted from iris.cube.Cube. - - Raises: - ------- - Exception if the cube has not been loaded. - - """ - - if key in ancillary_data.keys(): - return ancillary_data[key].data - - raise Exception('Ancillary data {} has not been loaded.'.format(key)) diff --git a/lib/improver/spotdata/common_functions.py b/lib/improver/spotdata/common_functions.py index 9c03c7bdb4..d07083476b 100644 --- a/lib/improver/spotdata/common_functions.py +++ b/lib/improver/spotdata/common_functions.py @@ -34,44 +34,56 @@ """ +import warnings import numpy as np from iris import Constraint from iris.time import PartialDateTime import cartopy.crs as ccrs + class ConditionalListExtract(object): - """ + ''' Performs a numerical comparison, the type selected with method, of data in an array and returns an array of indices in that data array that fulfill the comparison. - Args: - ----- - method : string - Which comparison to make, e.g. not_equal_to. + ''' - data : numpy.array - Array of values to be filtered. + def __init__(self, method): + """ + Get selected method of comparison. - indices_list : list - Indices in the data array that should be considered. + Args: + ----- + method : string + Which comparison to make, e.g. not_equal_to. - comparison_value: float - Value against which numbers in data are to be compared. + """ + self.method = method - Returns: - -------- - array_of_indices.tolist(): list - A list of the the indices of data values that fulfill the comparison - condition. + def process(self, data, indices_list, comparison_value): + """ + Call the data comparison method passed in. - """ + Args: + ----- + data : numpy.array + Array of values to be filtered. - def __init__(self, method): - self.method = method + indices_list : list + Indices in the data array that should be considered. + + comparison_value: float + Value against which numbers in data are to be compared. + + Returns: + -------- + array_of_indices.tolist(): list + A list of the the indices of data values that fulfill the + comparison condition. + + """ - def process(self, data, indices_list, comparison_value): - """ Call the data comparison method passed in""" array_of_indices = np.array(indices_list) function = getattr(self, self.method) subset = function(data, array_of_indices, comparison_value) @@ -302,7 +314,7 @@ def construct_neighbour_hash(neighbour_finding): Returns: -------- - neighbour_hash : string + A concatenated string of the options e.g. 'fast_nearest_neighbour-None-False' @@ -325,7 +337,7 @@ def apply_bias(vertical_bias, dzs): relative to the site; above/below/None. dzs : numpy.array - Array of vertical displacments calculated as the subtraction of grid + Array of vertical displacements calculated as the subtraction of grid orography altitudes from spot site altitudes. Returns: @@ -406,3 +418,68 @@ def isclose(val1, val2, rel_tol=1e-09, abs_tol=0.0): """ return abs(val1-val2) <= max(rel_tol * max(abs(val1), abs(val2)), abs_tol) + + +def extract_cube_at_time(cubes, time, time_extract): + """ + Extract a single cube at a given time from a cubelist. + + Args: + ----- + cubes : iris.cube.CubeList + CubeList of a given diagnostic over several times. + + time : datetime.datetime object + Time at which forecast data is needed. + + time_extract : iris.Constraint + Iris constraint for the desired time. + + Returns: + -------- + cube : iris.cube.Cube + Cube of data at the desired time. + + Raises: + ------- + ValueError if the desired time is not available within the cubelist. + + """ + try: + cube_in, = cubes.extract(time_extract) + return cube_in + except ValueError: + msg = ('Forecast time {} not found within data cubes.'.format( + time.strftime("%Y-%m-%d:%H:%M"))) + warnings.warn(msg) + return None + + +def extract_ad_at_time(additional_diagnostics, time, time_extract): + """ + Extracts additional diagnostics at the required time. + + Args: + ----- + additional_diagnostics : dict + Dictionary of additional time varying diagnostics needed + for the extraction method in use. + + time : datetime.datetime object + Time at which forecast data is needed. + + time_extract : iris.Constraint + Iris constraint for the desired time. + + Returns: + -------- + ad_extracted : dict + Dictionary of the additional diagnostics but only data + at the desired time. + + """ + ad_extracted = {} + for key in additional_diagnostics.keys(): + cubes = additional_diagnostics[key] + ad_extracted[key] = extract_cube_at_time(cubes, time, time_extract) + return ad_extracted diff --git a/lib/improver/spotdata/neighbour_finding.py b/lib/improver/spotdata/neighbour_finding.py index bb95ab728c..b4520a766e 100644 --- a/lib/improver/spotdata/neighbour_finding.py +++ b/lib/improver/spotdata/neighbour_finding.py @@ -32,14 +32,11 @@ """Neighbour finding for the Improver site specific process chain.""" import numpy as np -from improver.spotdata.ancillaries import data_from_ancillary -from improver.spotdata.common_functions import (ConditionalListExtract, - nearest_n_neighbours, - get_nearest_coords, - index_of_minimum_difference, - list_entry_from_index, - node_edge_test, apply_bias, - xy_test, xy_transform, isclose) +from improver.spotdata.read_input import data_from_dictionary +from improver.spotdata.common_functions import ( + ConditionalListExtract, nearest_n_neighbours, get_nearest_coords, + index_of_minimum_difference, list_entry_from_index, node_edge_test, + apply_bias, xy_test, xy_transform, isclose) class PointSelection(object): @@ -109,6 +106,7 @@ def process(self, cube, sites, ancillary_data, default_neighbours/no_neighbours : see minimum_height_error_neighbour() below. + Returns: -------- neighbours : numpy.dtype (fields: i, j, dz, edgepoint) @@ -121,17 +119,19 @@ def process(self, cube, sites, ancillary_data, """ if self.method == 'fast_nearest_neighbour': if 'orography' in ancillary_data.keys(): - orography = data_from_ancillary(ancillary_data, 'orography') + orography = data_from_dictionary( + ancillary_data, 'orography').data else: orography = None return self.fast_nearest_neighbour(cube, sites, orography=orography) elif self.method == 'minimum_height_error_neighbour': - orography = data_from_ancillary(ancillary_data, 'orography') + orography = data_from_dictionary(ancillary_data, 'orography').data land_mask = None if self.land_constraint: - land_mask = data_from_ancillary(ancillary_data, 'land_mask') + land_mask = data_from_dictionary( + ancillary_data, 'land_mask').data return self.minimum_height_error_neighbour( cube, sites, orography, land_mask=land_mask, @@ -279,11 +279,11 @@ def minimum_height_error_neighbour(self, cube, sites, orography, # one neighbouring point is also land. If not no modification # is made to the nearest neighbour coordinates. - exclude_self = nearest_n_neighbours(i, j, no_neighbours, - exclude_self=True) + neighbour_nodes = nearest_n_neighbours(i, j, no_neighbours, + exclude_self=True) if edgepoint: - exclude_self = node_edge_test(exclude_self, cube) - if not land_mask[i, j] or not any(land_mask[exclude_self]): + neighbour_nodes = node_edge_test(neighbour_nodes, cube) + if not land_mask[i, j] or not any(land_mask[neighbour_nodes]): continue # Filter the node_list to keep only land points diff --git a/lib/improver/spotdata/read_input.py b/lib/improver/spotdata/read_input.py new file mode 100644 index 0000000000..be9ac31d57 --- /dev/null +++ b/lib/improver/spotdata/read_input.py @@ -0,0 +1,187 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +""" +Plugins written for the Improver site specific process chain. +For reading data files from UM output and site specification input. + +""" + +from iris import load_cube, load +from iris import FUTURE +from iris.cube import CubeList + +FUTURE.netcdf_promote = True + + +class Load(object): + + """Plugin for loading data.""" + + def __init__(self, method): + """ + Simple function that currently takes a filename and loads a netCDF + file. + + Args: + ----- + method : string + A string representing the method of loading, be it a 'single_file' + that is loaded as an iris.cube.Cube, or 'multi_file' that causes + an iris.cube.CubeList to be returned containing all the cubes. + + """ + self.method = method + + def process(self, filepath, diagnostic): + """ + Simple wrapper for using iris load on a supplied netCDF file. + + Args: + ----- + filepath : string + Path to the input data files. + + diagnostic : string + The name of the desired diagnostic to be loaded. + + Returns + ------- + An iris.cube.Cube containing the data from the netCDF file. + + """ + function = getattr(self, self.method) + return function(filepath, diagnostic) + + @staticmethod + def single_file(filepath, diagnostic): + """ Load and return a single iris.cube.Cube """ + return load_cube(filepath, diagnostic) + + @staticmethod + def multi_file(filepath, diagnostic): + """ Load multiple cubes and return a iris.cube.CubeList """ + return load(filepath, diagnostic) + + +def get_method_prerequisites(method, diagnostic_data_path): + """ + Determine which additional diagnostics are required for a given + method of data extraction. + + Args: + ----- + method : string + String representing the method of data extraction that is being used. + + Returns: + -------- + additional_diagnostics: dict + A dictionary keyed with the diagnostic names and containing the + additional cubes that are required. + + """ + if method == 'model_level_temperature_lapse_rate': + diagnostics = [ + 'temperature_on_height_levels', + 'pressure_on_height_levels', + 'surface_pressure'] + else: + return None + + additional_diagnostics = {} + for item in diagnostics: + additional_diagnostics[item] = get_additional_diagnostics( + item, diagnostic_data_path) + + return additional_diagnostics + + +def get_additional_diagnostics(diagnostic_name, diagnostic_data_path, + time_extract=None): + """ + Load additional diagnostics needed for particular spot data processes. + + Args + ---- + diagnostic_name : The name of the diagnostic to be loaded. Used to find + the relevant file. + + time_extract : An iris constraint to extract and return only data from + the desired time. + + Returns + ------- + cube : An iris.cube.CubeList containing the desired diagnostic + data, with a single entry is time_extract is provided. + + """ + with FUTURE.context(cell_datetime_objects=True): + cubes = Load('multi_file').process( + diagnostic_data_path + '/*/*' + diagnostic_name + '*', + None) + if time_extract is not None: + cube = cubes.extract(time_extract) + cubes = CubeList() + cubes.append(cube) + return cubes + + +def data_from_dictionary(dictionary_data, key): + """ + Check for an iris.cube.Cube of information in a data + dictionary, such as the ancillaries dictionary. + + Args: + ----- + dictionary_data : dict + Dictionary of data to be extracted. + + key : string + Name of data field requested. + + Returns: + -------- + data extracted from the dictionary. + + Raises: + ------- + Exception if the is not available in the dictionary. + + """ + if not isinstance(dictionary_data, dict): + raise TypeError('Invalid type sent to data_from_dictionary - ' + 'Not a dictionary.') + + if key in dictionary_data.keys(): + return dictionary_data[key] + + raise Exception('Data {} not found in dictionary.'.format(key)) diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index 2f89b8928b..f88f1ec554 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -28,7 +28,7 @@ # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -"""Unit tests for the spotdata.NeighbourFinding plugin.""" +"""Unit tests for the spotdata.PointSelection plugin.""" import unittest @@ -42,9 +42,9 @@ from improver.spotdata.neighbour_finding import PointSelection -class TestNeighbourFinding(IrisTest): +class TestPointSelection(IrisTest): - """Test the neighbour finding plugin.""" + """Test the point selection (grid point neighbour finding) plugin.""" def setUp(self): """Create a cube containing a regular lat-lon grid.""" @@ -72,12 +72,12 @@ def setUp(self): ancillary_data.update({'land_mask': land}) sites = OrderedDict() - sites['100'] = { - 'latitude': 50, - 'longitude': 0, - 'altitude': 10, - 'gmtoffset': 0 - } + sites.update({'100': {'latitude': 50, + 'longitude': 0, + 'altitude': 10, + 'gmtoffset': 0 + } + }) neighbour_list = np.empty(1, dtype=[('i', 'i8'), ('j', 'i8'), @@ -98,7 +98,7 @@ def return_types(self, method, vertical_bias=None, land_constraint=False): def correct_neighbour(self, method, i_expected, j_expected, dz_expected, vertical_bias=None, land_constraint=False): - """Test that the plugin returns the expected neighbour""" + """Test that the plugin returns the expected neighbour.""" plugin = PointSelection(method, vertical_bias, land_constraint) result = plugin.process(self.cube, self.sites, self.ancillary_data) self.assertEqual(result['i'], i_expected) @@ -107,18 +107,18 @@ def correct_neighbour(self, method, i_expected, j_expected, dz_expected, def without_ancillary_data(self, method, vertical_bias=None, land_constraint=False): - """Test plugins behaviour with no ancillary data provided""" + """Test plugins behaviour with no ancillary data provided.""" plugin = PointSelection(method, vertical_bias, land_constraint) if method == 'fast_nearest_neighbour': result = plugin.process(self.cube, self.sites, {}) self.assertIsInstance(result, np.ndarray) else: - msg = 'Ancillary data' + msg = 'Data ' with self.assertRaisesRegexp(Exception, msg): result = plugin.process(self.cube, self.sites, {}) -class miscellaneous(TestNeighbourFinding): +class miscellaneous(TestPointSelection): def test_invalid_method(self): """ Test that the plugin can handle an invalid method being passed in. @@ -127,7 +127,7 @@ def test_invalid_method(self): plugin = PointSelection('smallest distance') msg = 'Unknown method' with self.assertRaisesRegexp(AttributeError, msg): - plugin.process(self.cube, self.sites, self.ancillary_data) + result = plugin.process(self.cube, self.sites, self.ancillary_data) def test_variable_no_neighbours(self): """ @@ -148,16 +148,21 @@ def test_variable_no_neighbours(self): self.assertEqual(result['dz'], 0.) def test_invalid_no_neighbours(self): + """ + Test use of a larger but invalid no of neighbours over which to find + the minimum vertical displacement. + + """ plugin = PointSelection(method='minimum_height_error_neighbour', vertical_bias=None, land_constraint=False) msg = 'Invalid nearest no' with self.assertRaisesRegexp(ValueError, msg): - plugin.process(self.cube, self.sites, self.ancillary_data, - no_neighbours=20) + result = plugin.process(self.cube, self.sites, self.ancillary_data, + no_neighbours=20) -class fast_nearest_neighbour(TestNeighbourFinding): +class fast_nearest_neighbour(TestPointSelection): ''' Tests for fast_nearest_neighbour method. No other conditions beyond proximity are considered. @@ -170,7 +175,7 @@ def test_return_type(self): self.return_types(self.method) def test_correct_neighbour(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10.) def test_without_ancillary_data(self): @@ -180,17 +185,18 @@ def test_without_ancillary_data(self): self.without_ancillary_data(self.method) -class min_dz_no_bias(TestNeighbourFinding): +class minimum_height_error_neighbour_no_bias(TestPointSelection): ''' - Tests for min_dz_no_bias method. This method seeks to minimise - the vertical displacement between a site and the selected neigbouring - grid point. There is no bias as to whether dz is positive (grid point + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is no bias as to whether dz is positive (grid point below site) or dz is negative (grid point above site). ''' method = 'minimum_height_error_neighbour' - # min_dz_no_bias def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -204,7 +210,7 @@ def test_without_ancillary_data(self): self.without_ancillary_data(self.method) def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10.) def test_correct_neighbour_orography(self): @@ -241,18 +247,19 @@ def test_correct_neighbour_orography_unequal_displacement(self): self.correct_neighbour(self.method, 16, 10, -1.) -class min_dz_biased_above(TestNeighbourFinding): +class minimum_height_error_neighbour_bias_above(TestPointSelection): ''' - Tests for min_dz_biased_above. This method seeks to minimise - the vertical displacement between a site and the selected neigbouring - grid point. There is a bias towards dz being negative (grid point above + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is a bias towards dz being negative (grid point ABOVE site), but if this condition cannot be met, a minimum positive dz (grid point below site) neighbour will be returned. ''' method = 'minimum_height_error_neighbour' - # min_dz_biased_above def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -266,7 +273,7 @@ def test_without_ancillary_data(self): self.without_ancillary_data(self.method, vertical_bias='above') def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='above') def test_correct_neighbour_orography(self): @@ -309,18 +316,19 @@ def test_correct_neighbour_orography_unequal_displacement(self): self.correct_neighbour(self.method, 16, 10, -2., vertical_bias='above') -class min_dz_biased_below(TestNeighbourFinding): +class minimum_height_error_neighbour_bias_below(TestPointSelection): ''' - Tests for min_dz_biased_below. This method seeks to minimise - the vertical displacement between a site and the selected neigbouring - grid point. There is a bias towards dz being positive (grid point below + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is a bias towards dz being positive (grid point BELOW site), but if this condition cannot be met, a minimum negative dz (grid point above site) neighbour will be returned. ''' method = 'minimum_height_error_neighbour' - # min_dz_biased_below' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -334,7 +342,7 @@ def test_without_ancillary_data(self): self.without_ancillary_data(self.method, vertical_bias='below') def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='below') def test_correct_neighbour_orography(self): @@ -377,22 +385,23 @@ def test_correct_neighbour_orography_unequal_displacement(self): self.correct_neighbour(self.method, 14, 10, 2., vertical_bias='below') -class min_dz_land_no_bias(TestNeighbourFinding): +class minimum_height_error_neighbour_land_no_bias(TestPointSelection): ''' - Tests for min_dz_land_no_bias method. This method seeks to - minimise the vertical displacement between a site and the selected - neigbouring grid point. There is no bias as to whether dz is positive - (grid point below site) or dz is negative (grid point above site). + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is no bias as to whether dz is positive (grid point + below site) or dz is negative (grid point above site). A neighbouring grid point is REQUIRED to be a land point if the site's first guess nearest neigbour is a land point. If the first guess neighbour - is a sea point, the site is assumed to be a sea point as well the + is a sea point, the site is assumed to be a sea point as well and the neighbour point will not be changed. ''' method = 'minimum_height_error_neighbour' - # min_dz_land_no_bias' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -406,7 +415,7 @@ def test_without_ancillary_data(self): self.without_ancillary_data(self.method, land_constraint=True) def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10., land_constraint=True) def test_correct_neighbour_orography(self): @@ -482,23 +491,24 @@ def test_correct_neighbour_orography_unequal_displacement_land(self): self.correct_neighbour(self.method, 14, 10, 2., land_constraint=True) -class min_dz_land_biased_above(TestNeighbourFinding): +class minimum_height_error_neighbour_land_bias_above(TestPointSelection): ''' - Tests for min_dz_land_biased_above. This method seeks to minimise - the vertical displacement between a site and the selected neigbouring - grid point. There is a bias towards dz being negative (grid point above + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is a bias towards dz being negative (grid point ABOVE site), but if this condition cannot be met, a minimum positive dz (grid point below site) neighbour will be returned. A neighbouring grid point is REQUIRED to be a land point if the site's first guess nearest neigbour is a land point. If the first guess neighbour - is a sea point, the site is assumed to be a sea point as well the + is a sea point, the site is assumed to be a sea point as well and the neighbour point will not be changed. ''' method = 'minimum_height_error_neighbour' - # min_dz_land_biased_above' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -514,7 +524,7 @@ def test_without_ancillary_data(self): land_constraint=True) def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='above', land_constraint=True) @@ -608,23 +618,24 @@ def test_correct_neighbour_orography_unequal_displacement_land(self): land_constraint=True) -class min_dz_land_biased_below(TestNeighbourFinding): +class minimum_height_error_neighbour_land_bias_below(TestPointSelection): ''' - Tests for min_dz_land_biased_below. This method seeks to minimise - the vertical displacement between a site and the selected neigbouring - grid point. There is a bias towards dz being positive (grid point below + Tests for the minimum_height_error neighbour method of point selection. + This method seeks to minimise the vertical displacement between a spotdata + site and a neigbouring grid point. + + In this case there is a bias towards dz being positive (grid point BELOW site), but if this condition cannot be met, a minimum negative dz (grid point above site) neighbour will be returned. A neighbouring grid point is REQUIRED to be a land point if the site's first guess nearest neigbour is a land point. If the first guess neighbour - is a sea point, the site is assumed to be a sea point as well the + is a sea point, the site is assumed to be a sea point as well and the neighbour point will not be changed. ''' method = 'minimum_height_error_neighbour' - # min_dz_land_biased_below' def test_return_type(self): '''Ensure a numpy array of the format expected is returned.''' @@ -640,7 +651,7 @@ def test_without_ancillary_data(self): land_constraint=True) def test_correct_neighbour_no_orography(self): - '''Nearest neighbouring grid point with no other conditions''' + '''Nearest neighbouring grid point with no other conditions.''' self.correct_neighbour(self.method, 15, 10, 10., vertical_bias='below', land_constraint=True) From 8371e50f5b93397cf509efff8fd5afd3aa9f5ec0 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Fri, 2 Jun 2017 10:37:52 +0100 Subject: [PATCH 0076/1367] Repeat an earlier fix. Oops. --- lib/improver/spotdata/tests/test_neighbour_finding.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/improver/spotdata/tests/test_neighbour_finding.py b/lib/improver/spotdata/tests/test_neighbour_finding.py index f88f1ec554..426c031c46 100644 --- a/lib/improver/spotdata/tests/test_neighbour_finding.py +++ b/lib/improver/spotdata/tests/test_neighbour_finding.py @@ -115,7 +115,7 @@ def without_ancillary_data(self, method, vertical_bias=None, else: msg = 'Data ' with self.assertRaisesRegexp(Exception, msg): - result = plugin.process(self.cube, self.sites, {}) + plugin.process(self.cube, self.sites, {}) class miscellaneous(TestPointSelection): @@ -127,7 +127,7 @@ def test_invalid_method(self): plugin = PointSelection('smallest distance') msg = 'Unknown method' with self.assertRaisesRegexp(AttributeError, msg): - result = plugin.process(self.cube, self.sites, self.ancillary_data) + plugin.process(self.cube, self.sites, self.ancillary_data) def test_variable_no_neighbours(self): """ @@ -158,8 +158,8 @@ def test_invalid_no_neighbours(self): land_constraint=False) msg = 'Invalid nearest no' with self.assertRaisesRegexp(ValueError, msg): - result = plugin.process(self.cube, self.sites, self.ancillary_data, - no_neighbours=20) + plugin.process(self.cube, self.sites, self.ancillary_data, + no_neighbours=20) class fast_nearest_neighbour(TestPointSelection): From f6f489688f2dbb04d2ddfeafedf4773d53edf336 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 5 Jun 2017 10:10:32 +0100 Subject: [PATCH 0077/1367] Edits to make the help description of the radii-in-km-by-lead-time argument more explicit, amend the associated test and correct a docstring. --- bin/improver-nbhood | 10 +++++++--- lib/improver/nbhood.py | 4 ++-- tests/improver-nbhood/01-help.bats | 9 +++++++-- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/bin/improver-nbhood b/bin/improver-nbhood index aea294081a..789a6ab619 100755 --- a/bin/improver-nbhood +++ b/bin/improver-nbhood @@ -52,9 +52,13 @@ def main(): help='The kernel radii for neighbourhood processing ' 'and the associated lead times at which the radii are ' 'valid. The radii are in km whilst the lead time ' - 'has units of hours.' - 'The radii and lead times are expected as ' - 'comma-separated lists e.g. 10,12,14.') + 'has units of hours. The radii and lead times are ' + 'expected as individual comma-separated lists with ' + 'the list of radii given first followed by a list of ' + 'lead times to indicate at what lead time each radii ' + 'should be used. For example: 10,12,14 1,2,3 ' + 'where a lead time of 1 hour uses a radius of 10km, ' + 'a lead time of 2 hours uses a radius of 12km, etc.') parser.add_argument('input_filepath', metavar='INPUT_FILE', help='A path to an input NetCDF file to be processed') parser.add_argument('output_filepath', metavar='OUTPUT_FILE', diff --git a/lib/improver/nbhood.py b/lib/improver/nbhood.py index 8067a4961f..ba237326f1 100644 --- a/lib/improver/nbhood.py +++ b/lib/improver/nbhood.py @@ -211,8 +211,8 @@ def _get_grid_x_y_kernel_ranges(self, cube, radius_in_km): def _apply_kernel_for_smoothing(self, cube, ranges): """ - Return the number of grid cells in the x and y direction - to be used to create the kernel. + Method to apply a kernel to the data within the input cube in order + to smooth the resulting field. Parameters ---------- diff --git a/tests/improver-nbhood/01-help.bats b/tests/improver-nbhood/01-help.bats index 620a99e2ef..db5debe2c6 100644 --- a/tests/improver-nbhood/01-help.bats +++ b/tests/improver-nbhood/01-help.bats @@ -24,8 +24,13 @@ optional arguments: The kernel radii for neighbourhood processing and the associated lead times at which the radii are valid. The radii are in km whilst the lead time has units of - hours.The radii and lead times are expected as comma- - separated lists e.g. 10,12,14. + hours. The radii and lead times are expected as + individual comma-separated lists with the list of + radii given first followed by a list of lead times to + indicate at what lead time each radii should be used. + For example: 10,12,14 1,2,3 where a lead time of 1 + hour uses a radius of 10km, a lead time of 2 hours + uses a radius of 12km, etc. __HELP__ [[ "$output" == "$expected" ]] } From f1bd1ae4a6ef326cef371b173421494a460f59b7 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 12:09:43 +0100 Subject: [PATCH 0078/1367] sphinx-apidoc runs automatically on build --- doc/source/conf.py | 25 ++++++++++++++---- ...emble_calibration.ensemble_calibration.rst | 7 ----- ...bration.ensemble_calibration_utilities.rst | 7 ----- doc/source/improver.ensemble_calibration.rst | 18 ------------- doc/source/improver.grids.osgb.rst | 7 ----- doc/source/improver.grids.rst | 17 ------------ doc/source/improver.nbhood.rst | 7 ----- doc/source/improver.rst | 26 ------------------- doc/source/improver.threshold.rst | 7 ----- doc/source/index.rst | 3 +-- doc/source/modules.rst | 7 ----- 11 files changed, 21 insertions(+), 110 deletions(-) delete mode 100644 doc/source/improver.ensemble_calibration.ensemble_calibration.rst delete mode 100644 doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst delete mode 100644 doc/source/improver.ensemble_calibration.rst delete mode 100644 doc/source/improver.grids.osgb.rst delete mode 100644 doc/source/improver.grids.rst delete mode 100644 doc/source/improver.nbhood.rst delete mode 100644 doc/source/improver.rst delete mode 100644 doc/source/improver.threshold.rst delete mode 100644 doc/source/modules.rst diff --git a/doc/source/conf.py b/doc/source/conf.py index 8d715a781b..b523fa6290 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -18,7 +18,10 @@ import os import sys -sys.path.insert(0, os.path.abspath('../../lib/')) + +SOURCE_DIR = os.path.abspath((os.path.join(os.curdir, '..', '..', 'lib'))) + +sys.path.insert(0, SOURCE_DIR) # -- General configuration ------------------------------------------------ @@ -64,9 +67,9 @@ # built documents. # # The short X.Y version. -version = u'1.0' +version = u'' # The full version, including alpha/beta/rc tags. -release = u'1.0' +release = u'' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -133,7 +136,6 @@ # documentation. # html_theme_options = { -'page_width': '1080px' # so 80 chars of code fit (default 940px) } # Add any paths that contain custom themes here, relative to this directory. @@ -142,7 +144,7 @@ # The name for this set of Sphinx documents. # " v documentation" by default. # -# html_title = u'Improver v1.0' +# html_title = u'Improver' # A shorter title for the navigation bar. Default is the same as html_title. # @@ -352,3 +354,16 @@ # Get napoleon to document constructor methods. napoleon_include_init_with_doc = True + +# Allow automatic running of sphinx-apidoc: +# Adapted from: https://github.com/rtfd/readthedocs.org/issues/1139 +def run_apidoc(_): + from sphinx.apidoc import main + import os + + output_dir = os.path.abspath(os.path.join(os.curdir, 'source')) + exclude_dir = os.path.join(SOURCE_DIR, 'improver', 'tests') + main(['-e', '-P', '-f', '-o', output_dir, SOURCE_DIR, exclude_dir]) + +def setup(app): + app.connect('builder-inited', run_apidoc) diff --git a/doc/source/improver.ensemble_calibration.ensemble_calibration.rst b/doc/source/improver.ensemble_calibration.ensemble_calibration.rst deleted file mode 100644 index 0c3378a74b..0000000000 --- a/doc/source/improver.ensemble_calibration.ensemble_calibration.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver.ensemble_calibration.ensemble_calibration module -========================================================= - -.. automodule:: improver.ensemble_calibration.ensemble_calibration - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst b/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst deleted file mode 100644 index bbd19c07ad..0000000000 --- a/doc/source/improver.ensemble_calibration.ensemble_calibration_utilities.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver.ensemble_calibration.ensemble_calibration_utilities module -=================================================================== - -.. automodule:: improver.ensemble_calibration.ensemble_calibration_utilities - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.ensemble_calibration.rst b/doc/source/improver.ensemble_calibration.rst deleted file mode 100644 index 05100adf26..0000000000 --- a/doc/source/improver.ensemble_calibration.rst +++ /dev/null @@ -1,18 +0,0 @@ -improver.ensemble_calibration package -===================================== - -Submodules ----------- - -.. toctree:: - - improver.ensemble_calibration.ensemble_calibration - improver.ensemble_calibration.ensemble_calibration_utilities - -Module contents ---------------- - -.. automodule:: improver.ensemble_calibration - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.grids.osgb.rst b/doc/source/improver.grids.osgb.rst deleted file mode 100644 index 6733f3c434..0000000000 --- a/doc/source/improver.grids.osgb.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver.grids.osgb module -========================== - -.. automodule:: improver.grids.osgb - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.grids.rst b/doc/source/improver.grids.rst deleted file mode 100644 index 033f58985d..0000000000 --- a/doc/source/improver.grids.rst +++ /dev/null @@ -1,17 +0,0 @@ -improver.grids package -====================== - -Submodules ----------- - -.. toctree:: - - improver.grids.osgb - -Module contents ---------------- - -.. automodule:: improver.grids - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.nbhood.rst b/doc/source/improver.nbhood.rst deleted file mode 100644 index 8e19c54135..0000000000 --- a/doc/source/improver.nbhood.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver.nbhood module -====================== - -.. automodule:: improver.nbhood - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.rst b/doc/source/improver.rst deleted file mode 100644 index eded451f1d..0000000000 --- a/doc/source/improver.rst +++ /dev/null @@ -1,26 +0,0 @@ -improver package -================ - -Subpackages ------------ - -.. toctree:: - - improver.ensemble_calibration - improver.grids - -Submodules ----------- - -.. toctree:: - - improver.nbhood - improver.threshold - -Module contents ---------------- - -.. automodule:: improver - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/improver.threshold.rst b/doc/source/improver.threshold.rst deleted file mode 100644 index 708c200fba..0000000000 --- a/doc/source/improver.threshold.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver.threshold module -========================= - -.. automodule:: improver.threshold - :members: - :undoc-members: - :show-inheritance: diff --git a/doc/source/index.rst b/doc/source/index.rst index f95d10a808..2aff0836f7 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -6,8 +6,7 @@ Welcome to Improver's documentation! ==================================== -This should contain a link to the API documentation. -See :doc:`improver` +API documentation: :doc:`improver` Indices and tables diff --git a/doc/source/modules.rst b/doc/source/modules.rst deleted file mode 100644 index 1a1bcf8da2..0000000000 --- a/doc/source/modules.rst +++ /dev/null @@ -1,7 +0,0 @@ -improver -======== - -.. toctree:: - :maxdepth: 4 - - improver From d3ec8c1adfd45ca6dd0bc0232a89436e2882f4ca Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 13:08:50 +0100 Subject: [PATCH 0079/1367] fixes to current path for Sphinx - RTD failed where local build worked --- doc/source/conf.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/doc/source/conf.py b/doc/source/conf.py index b523fa6290..6d4104b1e7 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -19,7 +19,10 @@ import os import sys -SOURCE_DIR = os.path.abspath((os.path.join(os.curdir, '..', '..', 'lib'))) +SOURCE_DIR = os.path.abspath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), + '..', '..', 'lib' + )) sys.path.insert(0, SOURCE_DIR) @@ -355,15 +358,17 @@ # Get napoleon to document constructor methods. napoleon_include_init_with_doc = True + # Allow automatic running of sphinx-apidoc: # Adapted from: https://github.com/rtfd/readthedocs.org/issues/1139 def run_apidoc(_): from sphinx.apidoc import main import os - output_dir = os.path.abspath(os.path.join(os.curdir, 'source')) + output_dir = os.path.dirname(os.path.abspath(__file__)) exclude_dir = os.path.join(SOURCE_DIR, 'improver', 'tests') main(['-e', '-P', '-f', '-o', output_dir, SOURCE_DIR, exclude_dir]) + def setup(app): app.connect('builder-inited', run_apidoc) From 38499e7274a8af20f53087e5c52653765aa2f3b6 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 13:31:51 +0100 Subject: [PATCH 0080/1367] Test documentation build w/ Travis --- bin/improver-tests | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/bin/improver-tests b/bin/improver-tests index ee8f7cdeac..014f83b6e7 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -22,7 +22,7 @@ if [[ ${1:-} == '--help' ]] || [[ ${1:-} == '-h' ]]; then cat <<'__USAGE__' improver tests [--debug] -Run pep8, pylint, unit and CLI acceptance tests. +Run pep8, pylint, documentation, unit and CLI acceptance tests. Optional arguments: --debug Run in verbose mode (may take longer for CLI) @@ -45,6 +45,12 @@ echo_ok "pep8" ${PYLINT:-pylint} -E --rcfile=../etc/pylintrc improver echo_ok "pylint -E" +# Build documentation as test. +cd $IMPROVER_DIR/doc +make html +echo_ok "sphinx-build -b html" +cd - + # Unit tests. python -m unittest discover echo_ok "Unit tests" From 6ac5be15c9265cb03c8f15cc13465aea6b88fe2d Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 14:03:06 +0100 Subject: [PATCH 0081/1367] Updated acknowledgements --- ACKNOWLEDGEMENTS.md | 5 +++++ environment.yml | 3 ++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/ACKNOWLEDGEMENTS.md b/ACKNOWLEDGEMENTS.md index 891226b86d..bfe9de717c 100644 --- a/ACKNOWLEDGEMENTS.md +++ b/ACKNOWLEDGEMENTS.md @@ -3,6 +3,11 @@ Credit for external pieces of work: Iris (https://github.com/SciTools/iris), LGPL: - .travis.yml, derived from Iris travis setup - .gitignore used as basis for ours + - environment.yml derived from Iris minimal conda environment BATS (https://github.com/sstephenson/bats), MIT-style: - tests/bin/bats\*, unaltered, from commit 0360811 + +Automated generation of API documentation from sphinx-build: + - Based on solution posted by BowenFu: + https://github.com/rtfd/readthedocs.org/issues/1139 diff --git a/environment.yml b/environment.yml index 09edba2ee2..a8724a85ef 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,5 @@ -# Taken from iris: https://github.com/SciTools/iris/ +# Based on minimal-conda-requirements.txt from iris: +# https://github.com/SciTools/iris/ # Use this file to create a conda environment using: # conda create -n --file environment.yml From b08f72de98ee0881ee61ad40c930439ee7e09d0c Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 14:46:13 +0100 Subject: [PATCH 0082/1367] Simplified Sphinx Makefile and updated Acknowledgements --- doc/Makefile | 160 +-------------------------------------------------- 1 file changed, 1 insertion(+), 159 deletions(-) diff --git a/doc/Makefile b/doc/Makefile index ff70b70f3a..d64596964c 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -17,28 +17,9 @@ I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source .PHONY: help help: @echo "Please use \`make ' where is one of" + @echo " apidoc to generate .rst files for documentation" @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " applehelp to make an Apple Help Book" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " epub3 to make an epub3" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" @echo " linkcheck to check all external links for integrity" @echo " doctest to run all doctests embedded in the documentation (if enabled)" @echo " coverage to run coverage check of the documentation (if enabled)" @@ -58,139 +39,12 @@ html: @echo @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." -.PHONY: dirhtml -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - .PHONY: singlehtml singlehtml: $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml @echo @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." -.PHONY: pickle -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -.PHONY: json -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -.PHONY: htmlhelp -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -.PHONY: qthelp -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Improver.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Improver.qhc" - -.PHONY: applehelp -applehelp: - $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp - @echo - @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." - @echo "N.B. You won't be able to view it unless you put it in" \ - "~/Library/Documentation/Help or install it in your application" \ - "bundle." - -.PHONY: devhelp -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/Improver" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Improver" - @echo "# devhelp" - -.PHONY: epub -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -.PHONY: epub3 -epub3: - $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 - @echo - @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." - -.PHONY: latex -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -.PHONY: latexpdf -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: latexpdfja -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -.PHONY: text -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -.PHONY: man -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -.PHONY: texinfo -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -.PHONY: info -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -.PHONY: gettext -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -.PHONY: changes -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - .PHONY: linkcheck linkcheck: $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck @@ -210,18 +64,6 @@ coverage: @echo "Testing of coverage in the sources finished, look at the " \ "results in $(BUILDDIR)/coverage/python.txt." -.PHONY: xml -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -.PHONY: pseudoxml -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." - .PHONY: dummy dummy: $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy From a9f88a21abd7ce9c29321348fd3b02e864b4c616 Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Mon, 5 Jun 2017 15:36:42 +0100 Subject: [PATCH 0083/1367] Trying to appease codacy, pylint and pep8 simultaneously, which is harder than you might think. --- lib/improver/spotdata/common_functions.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/improver/spotdata/common_functions.py b/lib/improver/spotdata/common_functions.py index d07083476b..3203e83621 100644 --- a/lib/improver/spotdata/common_functions.py +++ b/lib/improver/spotdata/common_functions.py @@ -352,8 +352,8 @@ def apply_bias(vertical_bias, dzs): elif vertical_bias == 'below': dz_subset, = np.where(dzs >= 0) - if (vertical_bias is None or len(dz_subset) == 0 - or len(dz_subset) == len(dzs)): + if (vertical_bias is None or len(dz_subset) == 0 or + len(dz_subset) == len(dzs)): dz_subset = np.arange(len(dzs)) return dz_subset From 1cd169ba142e90d654bea6c47066bbc9171f31cd Mon Sep 17 00:00:00 2001 From: "benjamin.ayliffe" Date: Mon, 5 Jun 2017 15:37:51 +0100 Subject: [PATCH 0084/1367] Newline added back to the end of constants. --- lib/improver/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/constants.py b/lib/improver/constants.py index 4d2994b42f..1a454a1c91 100644 --- a/lib/improver/constants.py +++ b/lib/improver/constants.py @@ -37,4 +37,4 @@ R_DRY_AIR = 287.0 # Specific heat capacity of dry air (J K-1 kg-1) -CP_DRY_AIR = 1005.0 \ No newline at end of file +CP_DRY_AIR = 1005.0 From 8693ca9766da495e77501d6c79aa347e6620870e Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 26 Apr 2017 15:27:02 +0100 Subject: [PATCH 0085/1367] Separate out Ensemble Copula Coupling plugins into a separate file, as this will require extending beyond the initial scope of the application to ensemble calibration output. --- .../ensemble_calibration.py | 316 --------------- lib/improver/ensemble_copula_coupling.py | 367 ++++++++++++++++++ ...ble_copula_coupling_EnsembleReordering.py} | 7 +- ...GeneratePercentilesFromMeanAndVariance.py} | 5 +- 4 files changed, 372 insertions(+), 323 deletions(-) create mode 100644 lib/improver/ensemble_copula_coupling.py rename lib/improver/tests/{test_ensemble_calibration_EnsembleReordering.py => test_ensemble_copula_coupling_EnsembleReordering.py} (98%) rename lib/improver/tests/{test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py => test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py} (99%) diff --git a/lib/improver/ensemble_calibration/ensemble_calibration.py b/lib/improver/ensemble_calibration/ensemble_calibration.py index 35d39d6531..135f7f8ab5 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration.py @@ -34,7 +34,6 @@ """ import copy import numpy as np -import random from scipy import stats from scipy.optimize import minimize from scipy.stats import norm @@ -1101,318 +1100,3 @@ def format_calibration_method(calibration_method): calibrated_forecast_predictor_and_variance = iris.cube.CubeList([ calibrated_forecast_predictor, calibrated_forecast_variance]) return calibrated_forecast_predictor_and_variance - - -class GeneratePercentilesFromMeanAndVariance(object): - """ - Plugin focussing on generating percentiles from mean and variance. - In combination with the EnsembleReordering plugin, this is Ensemble - Copula Coupling. - """ - - def __init__(self): - """Initialise the class.""" - pass - - def _create_cube_with_percentiles( - self, percentiles, template_cube, cube_data): - """ - Create a cube with a percentile coordinate based on a template cube. - - Parameters - ---------- - percentiles : List - Ensemble percentiles. - template_cube : Iris cube - Cube to copy majority of coordinate definitions from. - cube_data : Numpy array - Data to insert into the template cube. - The data is expected to have the shape of - percentiles (0th dimension), time (1st dimension), - y_coord (2nd dimension), x_coord (3rd dimension). - - Returns - ------- - String - Coordinate name of the matched coordinate. - - """ - percentile_coord = iris.coords.DimCoord( - np.float32(percentiles), long_name="percentile", - units=unit.Unit("1"), var_name="percentile") - - time_coord = template_cube.coord("time") - y_coord = template_cube.coord(axis="y") - x_coord = template_cube.coord(axis="x") - - dim_coords_and_dims = [ - (percentile_coord, 0), (time_coord, 1), - (y_coord, 2), (x_coord, 3)] - - frt_coord = template_cube.coord("forecast_reference_time") - fp_coord = template_cube.coord("forecast_period") - aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] - - metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) - - cube = iris.cube.Cube( - cube_data, dim_coords_and_dims=dim_coords_and_dims, - aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) - cube.attributes = template_cube.attributes - cube.cell_methods = template_cube.cell_methods - return cube - - def _mean_and_variance_to_percentiles( - self, calibrated_forecast_predictor, calibrated_forecast_variance, - percentiles): - """ - Function returning percentiles based on the supplied - mean and variance. The percentiles are created by assuming a - Gaussian distribution and calculating the value of the phenomenon at - specific points within the distribution. - - Parameters - ---------- - calibrated_forecast_predictor : cube - Predictor for the calibrated forecast i.e. the mean. - calibrated_forecast_variance : cube - Variance for the calibrated forecast. - percentiles : List - Percentiles at which to calculate the value of the phenomenon at. - - Returns - ------- - percentile_cube : Iris cube - Cube containing the values for the phenomenon at each of the - percentiles requested. - - """ - if not calibrated_forecast_predictor.coord_dims("time"): - calibrated_forecast_predictor = iris.util.new_axis( - calibrated_forecast_predictor, "time") - if not calibrated_forecast_variance.coord_dims("time"): - calibrated_forecast_variance = iris.util.new_axis( - calibrated_forecast_variance, "time") - - calibrated_forecast_predictor_data = ( - calibrated_forecast_predictor.data.flatten()) - calibrated_forecast_variance_data = ( - calibrated_forecast_variance.data.flatten()) - - result = np.zeros((calibrated_forecast_predictor_data.shape[0], - len(percentiles))) - - # Loop over percentiles, and use a normal distribution with the mean - # and variance to calculate the values at each percentile. - for index, percentile in enumerate(percentiles): - percentile_list = np.repeat( - percentile, len(calibrated_forecast_predictor_data)) - result[:, index] = norm.ppf( - percentile_list, loc=calibrated_forecast_predictor_data, - scale=np.sqrt(calibrated_forecast_variance_data)) - # If percent point function (PPF) returns NaNs, fill in - # mean instead of NaN values. NaN will only be generated if the - # variance is zero. Therefore, if the variance is zero, the mean - # value is used for all gridpoints with a NaN. - if np.any(calibrated_forecast_variance_data == 0): - nan_index = np.argwhere(np.isnan(result[:, index])) - result[nan_index, index] = ( - calibrated_forecast_predictor_data[nan_index]) - if np.any(np.isnan(result)): - msg = ("NaNs are present within the result for the {} " - "percentile. Unable to calculate the percent point " - "function.") - raise ValueError(msg) - - result = result.T - - t_coord = calibrated_forecast_predictor.coord("time") - y_coord = calibrated_forecast_predictor.coord(axis="y") - x_coord = calibrated_forecast_predictor.coord(axis="x") - - result = result.reshape( - len(percentiles), len(t_coord.points), len(y_coord.points), - len(x_coord.points)) - percentile_cube = self._create_cube_with_percentiles( - percentiles, calibrated_forecast_predictor, result) - - percentile_cube.cell_methods = {} - return percentile_cube - - def _create_percentiles( - self, no_of_percentiles, sampling="quantile"): - """ - Function to create percentiles. - - Parameters - ---------- - no_of_percentiles : Int - Number of percentiles. - sampling : String - Type of sampling of the distribution to produce a set of - percentiles e.g. quantile or random. - Accepted options for sampling are: - Quantile: A regular set of equally-spaced percentiles aimed - at dividing a Cumulative Distribution Function into - blocks of equal probability. - Random: A random set of ordered percentiles. - - For further details, Flowerdew, J., 2014. - Calibrating ensemble reliability whilst preserving spatial structure. - Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. - Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. - Uncertainty Quantification in Complex Simulation Models Using Ensemble - Copula Coupling. - Statistical Science, 28(4), pp.616-640. - - Returns - ------- - percentiles : List - Percentiles calculated using the sampling technique specified. - - """ - if sampling in ["quantile"]: - percentiles = np.linspace( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles), - no_of_percentiles).tolist() - elif sampling in ["random"]: - percentiles = [] - for _ in range(no_of_percentiles): - percentiles.append( - random.uniform( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles))) - percentiles = sorted(percentiles) - else: - msg = "The {} sampling option is not yet implemented.".format( - sampling) - raise ValueError(msg) - return percentiles - - def process(self, calibrated_forecast_predictor_and_variance, - raw_forecast): - """ - Generate ensemble percentiles from the mean and variance. - - Parameters - ---------- - calibrated_forecast_predictor_and_variance : Iris CubeList - CubeList containing the calibrated forecast predictor and - calibrated forecast variance. - raw_forecast : Iris Cube or CubeList - Cube or CubeList that is expected to be the raw - (uncalibrated) forecast. - Returns - ------- - calibrated_forecast_percentiles : Iris cube - Cube for calibrated percentiles. - - """ - (calibrated_forecast_predictor, calibrated_forecast_variance) = ( - calibrated_forecast_predictor_and_variance) - - calibrated_forecast_predictor = concatenate_cubes( - calibrated_forecast_predictor) - calibrated_forecast_variance = concatenate_cubes( - calibrated_forecast_variance) - rename_coordinate( - raw_forecast, "ensemble_member_id", "realization") - raw_forecast_members = concatenate_cubes(raw_forecast) - - no_of_percentiles = len( - raw_forecast_members.coord("realization").points) - - percentiles = self._create_percentiles(no_of_percentiles) - calibrated_forecast_percentiles = ( - self._mean_and_variance_to_percentiles( - calibrated_forecast_predictor, - calibrated_forecast_variance, - percentiles)) - - return calibrated_forecast_percentiles - - -class EnsembleReordering(object): - """ - Plugin for applying the reordering step of Ensemble Copula Coupling, - in order to generate ensemble members from percentiles. - The percentiles are assumed to be in ascending order. - - Reference: - Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. - Uncertainty Quantification in Complex Simulation Models Using Ensemble - Copula Coupling. - Statistical Science, 28(4), pp.616-640. - - """ - def __init__(self): - """Initialise the class.""" - pass - - def rank_ecc(self, calibrated_forecast_percentiles, raw_forecast_members): - """ - Function to apply Ensemble Copula Coupling. This ranks the calibrated - forecast members based on a ranking determined from the raw forecast - members. - - Parameters - ---------- - calibrated_forecast_percentiles : cube - Cube for calibrated percentiles. The percentiles are assumed to be - in ascending order. - raw_forecast_members : cube - Cube containing the raw (uncalibrated) forecasts. - - Returns - ------- - Iris cube - Cube for calibrated members where at a particular grid point, - the ranking of the values within the ensemble matches the ranking - from the raw ensemble. - - """ - results = iris.cube.CubeList([]) - for rawfc, calfc in zip( - raw_forecast_members.slices_over("time"), - calibrated_forecast_percentiles.slices_over("time")): - random_data = np.random.random(rawfc.data.shape) - # Lexsort returns the indices sorted firstly by the primary key, - # the raw forecast data, and secondly by the secondary key, an - # array of random data, in order to split tied values randomly. - sorting_index = np.lexsort((random_data, rawfc.data), axis=0) - # Returns the indices that would sort the array. - ranking = np.argsort(sorting_index, axis=0) - # Index the calibrated forecast data using the ranking array. - # np.choose allows indexing of a 3d array using a 3d array, - calfc.data = np.choose(ranking, calfc.data) - results.append(calfc) - return concatenate_cubes(results) - - def process(self, calibrated_forecast, raw_forecast): - """ - Parameters - ---------- - calibrated_forecast : Iris Cube or CubeList - The cube or cubelist containing the calibrated forecast members. - raw_forecast : Iris Cube or CubeList - The cube or cubelist containing the raw (uncalibrated) forecast. - - Returns - ------- - calibrated_forecast_members : cube - Cube for a new ensemble member where all points within the dataset - are representative of a specified probability threshold across the - whole domain. - """ - rename_coordinate( - raw_forecast, "ensemble_member_id", "realization") - calibrated_forecast_percentiles = concatenate_cubes( - calibrated_forecast, - coords_to_slice_over=["percentile", "time"]) - raw_forecast_members = concatenate_cubes(raw_forecast) - calibrated_forecast_members = self.rank_ecc( - calibrated_forecast_percentiles, raw_forecast_members) - rename_coordinate( - calibrated_forecast_members, "percentile", "realization") - return calibrated_forecast_members diff --git a/lib/improver/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling.py new file mode 100644 index 0000000000..cc07fb6125 --- /dev/null +++ b/lib/improver/ensemble_copula_coupling.py @@ -0,0 +1,367 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +This module defines the plugins required for Ensemble Copula Coupling. + +""" +import copy +import numpy as np +import random +from scipy.stats import norm + +import cf_units as unit +import iris + +from ensemble_calibration_utilities import ( + concatenate_cubes, rename_coordinate) + + +class GeneratePercentilesFromMeanAndVariance(object): + """ + Plugin focussing on generating percentiles from mean and variance. + In combination with the EnsembleReordering plugin, this is Ensemble + Copula Coupling. + """ + + def __init__(self, calibrated_forecast_predictor_and_variance, + raw_forecast): + """ + Initialise the class. + + Parameters + ---------- + calibrated_forecast_predictor_and_variance : Iris CubeList + CubeList containing the calibrated forecast predictor and + calibrated forecast variance. + raw_forecast : Iris Cube or CubeList + Cube or CubeList that is expected to be the raw + (uncalibrated) forecast. + + """ + (self.calibrated_forecast_predictor, + self.calibrated_forecast_variance) = ( + calibrated_forecast_predictor_and_variance) + self.raw_forecast = raw_forecast + + def _create_cube_with_percentiles( + self, percentiles, template_cube, cube_data): + """ + Create a cube with a percentile coordinate based on a template cube. + + Parameters + ---------- + percentiles : List + Ensemble percentiles. + template_cube : Iris cube + Cube to copy majority of coordinate definitions from. + cube_data : Numpy array + Data to insert into the template cube. + The data is expected to have the shape of + percentiles (0th dimension), time (1st dimension), + y_coord (2nd dimension), x_coord (3rd dimension). + + Returns + ------- + String + Coordinate name of the matched coordinate. + + """ + percentile_coord = iris.coords.DimCoord( + np.float32(percentiles), long_name="percentile", + units=unit.Unit("1"), var_name="percentile") + + time_coord = template_cube.coord("time") + y_coord = template_cube.coord(axis="y") + x_coord = template_cube.coord(axis="x") + + dim_coords_and_dims = [ + (percentile_coord, 0), (time_coord, 1), + (y_coord, 2), (x_coord, 3)] + + frt_coord = template_cube.coord("forecast_reference_time") + fp_coord = template_cube.coord("forecast_period") + aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] + + metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) + + cube = iris.cube.Cube( + cube_data, dim_coords_and_dims=dim_coords_and_dims, + aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) + cube.attributes = template_cube.attributes + cube.cell_methods = template_cube.cell_methods + return cube + + def _mean_and_variance_to_percentiles( + self, calibrated_forecast_predictor, calibrated_forecast_variance, + percentiles): + """ + Function returning percentiles based on the supplied + mean and variance. The percentiles are created by assuming a + Gaussian distribution and calculating the value of the phenomenon at + specific points within the distribution. + + Parameters + ---------- + calibrated_forecast_predictor : cube + Predictor for the calibrated forecast i.e. the mean. + calibrated_forecast_variance : cube + Variance for the calibrated forecast. + percentiles : List + Percentiles at which to calculate the value of the phenomenon at. + + Returns + ------- + percentile_cube : Iris cube + Cube containing the values for the phenomenon at each of the + percentiles requested. + + """ + if not calibrated_forecast_predictor.coord_dims("time"): + calibrated_forecast_predictor = iris.util.new_axis( + calibrated_forecast_predictor, "time") + if not calibrated_forecast_variance.coord_dims("time"): + calibrated_forecast_variance = iris.util.new_axis( + calibrated_forecast_variance, "time") + + calibrated_forecast_predictor_data = ( + calibrated_forecast_predictor.data.flatten()) + calibrated_forecast_variance_data = ( + calibrated_forecast_variance.data.flatten()) + + result = np.zeros((calibrated_forecast_predictor_data.shape[0], + len(percentiles))) + + # Loop over percentiles, and use a normal distribution with the mean + # and variance to calculate the values at each percentile. + for index, percentile in enumerate(percentiles): + percentile_list = np.repeat( + percentile, len(calibrated_forecast_predictor_data)) + result[:, index] = norm.ppf( + percentile_list, loc=calibrated_forecast_predictor_data, + scale=np.sqrt(calibrated_forecast_variance_data)) + # If percent point function (PPF) returns NaNs, fill in + # mean instead of NaN values. NaN will only be generated if the + # variance is zero. Therefore, if the variance is zero, the mean + # value is used for all gridpoints with a NaN. + if np.any(calibrated_forecast_variance_data == 0): + nan_index = np.argwhere(np.isnan(result[:, index])) + result[nan_index, index] = ( + calibrated_forecast_predictor_data[nan_index]) + if np.any(np.isnan(result)): + msg = ("NaNs are present within the result for the {} " + "percentile. Unable to calculate the percent point " + "function.") + raise ValueError(msg) + + result = result.T + + t_coord = calibrated_forecast_predictor.coord("time") + y_coord = calibrated_forecast_predictor.coord(axis="y") + x_coord = calibrated_forecast_predictor.coord(axis="x") + + result = result.reshape( + len(percentiles), len(t_coord.points), len(y_coord.points), + len(x_coord.points)) + percentile_cube = self._create_cube_with_percentiles( + percentiles, calibrated_forecast_predictor, result) + + percentile_cube.cell_methods = {} + return percentile_cube + + def _create_percentiles( + self, no_of_percentiles, sampling="quantile"): + """ + Function to create percentiles. + + Parameters + ---------- + no_of_percentiles : Int + Number of percentiles. + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + For further details, Flowerdew, J., 2014. + Calibrating ensemble reliability whilst preserving spatial structure. + Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + Returns + ------- + percentiles : List + Percentiles calculated using the sampling technique specified. + + """ + if sampling in ["quantile"]: + percentiles = np.linspace( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles), + no_of_percentiles).tolist() + elif sampling in ["random"]: + percentiles = [] + for _ in range(no_of_percentiles): + percentiles.append( + random.uniform( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles))) + percentiles = sorted(percentiles) + else: + msg = "The {} sampling option is not yet implemented.".format( + sampling) + raise ValueError(msg) + return percentiles + + def process(self): + """ + Generate ensemble percentiles from the mean and variance. + + Returns + ------- + calibrated_forecast_percentiles : Iris cube + Cube for calibrated percentiles. + + """ + raw_forecast = self.raw_forecast + + calibrated_forecast_predictor = concatenate_cubes( + self.calibrated_forecast_predictor) + calibrated_forecast_variance = concatenate_cubes( + self.calibrated_forecast_variance) + rename_coordinate( + self.raw_forecast, "ensemble_member_id", "realization") + raw_forecast_members = concatenate_cubes(self.raw_forecast) + + no_of_percentiles = len( + raw_forecast_members.coord("realization").points) + + percentiles = self._create_percentiles(no_of_percentiles) + calibrated_forecast_percentiles = ( + self._mean_and_variance_to_percentiles( + calibrated_forecast_predictor, + calibrated_forecast_variance, + percentiles)) + + return calibrated_forecast_percentiles + + +class EnsembleReordering(object): + """ + Plugin for applying the reordering step of Ensemble Copula Coupling, + in order to generate ensemble members from percentiles. + The percentiles are assumed to be in ascending order. + + Reference: + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + """ + def __init__(self, calibrated_forecast, raw_forecast): + """ + Parameters + ---------- + calibrated_forecast : Iris Cube or CubeList + The cube or cubelist containing the calibrated forecast members. + raw_forecast : Iris Cube or CubeList + The cube or cubelist containing the raw (uncalibrated) forecast. + + """ + self.calibrated_forecast = calibrated_forecast + self.raw_forecast = raw_forecast + + def rank_ecc(self, calibrated_forecast_percentiles, raw_forecast_members): + """ + Function to apply Ensemble Copula Coupling. This ranks the calibrated + forecast members based on a ranking determined from the raw forecast + members. + + Parameters + ---------- + calibrated_forecast_percentiles : cube + Cube for calibrated percentiles. The percentiles are assumed to be + in ascending order. + raw_forecast_members : cube + Cube containing the raw (uncalibrated) forecasts. + + Returns + ------- + Iris cube + Cube for calibrated members where at a particular grid point, + the ranking of the values within the ensemble matches the ranking + from the raw ensemble. + + """ + results = iris.cube.CubeList([]) + for rawfc, calfc in zip( + raw_forecast_members.slices_over("time"), + calibrated_forecast_percentiles.slices_over("time")): + random_data = np.random.random(rawfc.data.shape) + # Lexsort returns the indices sorted firstly by the primary key, + # the raw forecast data, and secondly by the secondary key, an + # array of random data, in order to split tied values randomly. + sorting_index = np.lexsort((random_data, rawfc.data), axis=0) + # Returns the indices that would sort the array. + ranking = np.argsort(sorting_index, axis=0) + # Index the calibrated forecast data using the ranking array. + # np.choose allows indexing of a 3d array using a 3d array, + calfc.data = np.choose(ranking, calfc.data) + results.append(calfc) + return concatenate_cubes(results) + + def process(self): + """ + Returns + ------- + calibrated_forecast_members : cube + Cube for a new ensemble member where all points within the dataset + are representative of a specified probability threshold across the + whole domain. + """ + rename_coordinate( + self.raw_forecast, "ensemble_member_id", "realization") + calibrated_forecast_percentiles = concatenate_cubes( + self.calibrated_forecast, + coords_to_slice_over=["percentile", "time"]) + raw_forecast_members = concatenate_cubes(self.raw_forecast) + calibrated_forecast_members = self.rank_ecc( + calibrated_forecast_percentiles, raw_forecast_members) + rename_coordinate( + calibrated_forecast_members, "percentile", "realization") + return calibrated_forecast_members diff --git a/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py similarity index 98% rename from lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py rename to lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 422a4dd153..76971516f8 100644 --- a/lib/improver/tests/test_ensemble_calibration_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -29,8 +29,8 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ -Unit tests for the `ensemble_calibration.EnsembleReordering` -class. +Unit tests for the +`ensemble_copula_coupling.EnsembleReordering` """ import unittest @@ -39,8 +39,7 @@ from iris.tests import IrisTest import numpy as np -from improver.ensemble_calibration.ensemble_calibration import ( - EnsembleReordering as Plugin) +from improver.ensemble_copula_coupling import EnsembleReordering as Plugin from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) diff --git a/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py similarity index 99% rename from lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py rename to lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index ca054da0c1..ede112561f 100644 --- a/lib/improver/tests/test_ensemble_calibration_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -30,8 +30,7 @@ # POSSIBILITY OF SUCH DAMAGE. """ Unit tests for the -`ensemble_calibration.GeneratePercentilesFromMeanAndVariance` -class. +`ensemble_copula_coupling.GeneratePercentilesFromMeanAndVariance` """ import unittest @@ -42,7 +41,7 @@ from iris.tests import IrisTest import numpy as np -from improver.ensemble_calibration.ensemble_calibration import ( +from improver.ensemble_copula_coupling import ( GeneratePercentilesFromMeanAndVariance as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) From b442c8c856fcb97df1e44742789f57261c6766c0 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 3 May 2017 17:21:15 +0100 Subject: [PATCH 0086/1367] Refactoring of the Ensemble Copula Coupling code into ensemble_copula_coupling.py. Addition of a GeneratePercentilesFromProbabilities plugin, which linearly interpolates probability values at each threshold to create an Empirical Cumulative Distribution Function, in order to generate percentiles. The number of number of percentiles required can be specified as an input argument, otherwise the number of raw ensemble members will be used. The EnsembleReordering plugin has been extended to have an option for randomly ordering the output ensemble, rather than use the ordering of the raw ensemble. Capability has also been added for if the number of percentiles and the number of ensemble members differ. --- lib/improver/ensemble_copula_coupling.py | 457 +++++++++++++----- .../ensemble_copula_coupling_constants.py | 5 + ...oupling_EnsembleCopulaCouplingUtilities.py | 169 +++++++ ...mble_copula_coupling_EnsembleReordering.py | 188 ++++++- ..._GeneratePercentilesFromMeanAndVariance.py | 132 ----- ...ng_GeneratePercentilesFromProbabilities.py | 293 +++++++++++ 6 files changed, 996 insertions(+), 248 deletions(-) create mode 100644 lib/improver/ensemble_copula_coupling_constants.py create mode 100644 lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py create mode 100644 lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py diff --git a/lib/improver/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling.py index cc07fb6125..ecbd23b29b 100644 --- a/lib/improver/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling.py @@ -41,38 +41,69 @@ import iris from ensemble_calibration_utilities import ( - concatenate_cubes, rename_coordinate) + concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) +from ensemble_copula_coupling_constants import bounds_for_ecdf -class GeneratePercentilesFromMeanAndVariance(object): +class EnsembleCopulaCouplingUtilities(object): """ - Plugin focussing on generating percentiles from mean and variance. - In combination with the EnsembleReordering plugin, this is Ensemble - Copula Coupling. + Class containing utilities used to enable Ensemble Copula Coupling. """ - - def __init__(self, calibrated_forecast_predictor_and_variance, - raw_forecast): + @staticmethod + def create_percentiles( + no_of_percentiles, sampling="quantile"): """ - Initialise the class. + Function to create percentiles. Parameters ---------- - calibrated_forecast_predictor_and_variance : Iris CubeList - CubeList containing the calibrated forecast predictor and - calibrated forecast variance. - raw_forecast : Iris Cube or CubeList - Cube or CubeList that is expected to be the raw - (uncalibrated) forecast. + no_of_percentiles : Int + Number of percentiles. + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + For further details, Flowerdew, J., 2014. + Calibrating ensemble reliability whilst preserving spatial structure. + Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + Returns + ------- + percentiles : List + Percentiles calculated using the sampling technique specified. """ - (self.calibrated_forecast_predictor, - self.calibrated_forecast_variance) = ( - calibrated_forecast_predictor_and_variance) - self.raw_forecast = raw_forecast + if sampling in ["quantile"]: + percentiles = np.linspace( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles), + no_of_percentiles).tolist() + elif sampling in ["random"]: + percentiles = [] + for _ in range(no_of_percentiles): + percentiles.append( + random.uniform( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles))) + percentiles = sorted(percentiles) + else: + msg = "The {} sampling option is not yet implemented.".format( + sampling) + raise ValueError(msg) + return percentiles - def _create_cube_with_percentiles( - self, percentiles, template_cube, cube_data): + @staticmethod + def create_cube_with_percentiles( + percentiles, template_cube, cube_data): """ Create a cube with a percentile coordinate based on a template cube. @@ -119,6 +150,177 @@ def _create_cube_with_percentiles( cube.cell_methods = template_cube.cell_methods return cube + +class GeneratePercentilesFromProbabilities(object): + """ + Class for generating percentiles from probabilities. + In combination with the Ensemble Reordering plugin, this is Ensemble + Copula Coupling. + """ + + def __init__(self): + """ + Initialise the class. + """ + pass + + def _add_bounds_to_thresholds_and_probabilities( + self, threshold_points, probabilities_for_cdf, bounds_pairing): + """ + Padding of the lower and upper bounds for a given phenomenon for the + threshold_points, and padding of probabilities of 0 and 1 to the + forecast probabilities. + + Parameters + ---------- + threshold_points : Numpy array + Array of threshold values used to calculate the probabilities. + probabilities_for_cdf : Numpy array + Array containing the probabilities used for constructing an + empirical cumulative distribution function i.e. probabilities + below threshold. + + Returns + ------- + threshold_points : Numpy array + Array of threshold values padded with the lower and upper bound. + probabilities_for_cdf : Numpy array + Array containing the probabilities padded with 0 and 1 at each end. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. + + """ + lower_bound, upper_bound = bounds_pairing + threshold_points = np.insert(threshold_points, 0, lower_bound) + threshold_points = np.append(threshold_points, upper_bound) + zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) + ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) + probabilities_for_cdf = np.concatenate( + (zeroes_array, probabilities_for_cdf, ones_array), axis=1) + return threshold_points, probabilities_for_cdf + + def _probabilities_to_percentiles( + self, forecast_probabilities, percentiles, bounds_pairing): + """ + Conversion of probabilities to percentiles through the construction + of an empirical cumulative distribution function. This is effectively + constructed by linear interpolation from the probabilities associated + with each threshold to a set of percentiles. + + Parameters + ---------- + forecast_probabilities : Iris cube + Cube with a probability_above_threshold coordinate. + percentiles : Numpy array + Array of percentiles, at which the corresponding values will be + calculated. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. + + Returns + ------- + percentile_cube : Iris cube + Cube with probabilities at the required percentiles. + + """ + threshold_points = ( + forecast_probabilities.coord("probability_above_threshold").points) + + prob_slices = convert_cube_data_to_2d( + forecast_probabilities, coord="probability_above_threshold") + + # Invert probabilities + probabilities_for_cdf = 1 - prob_slices + + threshold_points, probabilities_for_cdf = ( + self._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing)) + + forecast_at_percentiles = ( + np.empty((probabilities_for_cdf.shape[0], len(percentiles)))) + for index in range(probabilities_for_cdf.shape[0]): + forecast_at_percentiles[index, :] = np.interp( + percentiles, probabilities_for_cdf[index, :], + threshold_points) + + t_coord = forecast_probabilities.coord("time") + y_coord = forecast_probabilities.coord(axis="y") + x_coord = forecast_probabilities.coord(axis="x") + + forecast_at_percentiles = forecast_at_percentiles.reshape( + len(percentiles), len(t_coord.points), len(y_coord.points), + len(x_coord.points)) + percentile_cube = ( + EnsembleCopulaCouplingUtilities.create_cube_with_percentiles( + percentiles, forecast_probabilities, forecast_at_percentiles)) + percentile_cube.cell_methods = {} + return percentile_cube + + def process(self, forecast_probabilities, no_of_percentiles=None, + sampling="quantile"): + """ + 1. Concatenates cubes with a probability_above_threshold coordinate. + 2. Creates a list of percentiles. + 3. Accesses the lower and upper bound pair to find the ends of the + empirical cumulative distribution function. + 4. Convert the probability_above_threshold coordinate into + values at a set of percentiles. + + Parameters + ---------- + forecast_probabilities : Iris CubeList or Iris Cube + Cube or CubeList expected to contain a probability_above_threshold + coordinate. + no_of_percentiles : Integer + Number of percentiles + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + Returns + ------- + forecast_at_percentiles : Iris cube + Cube with forecast values at the desired set of percentiles. + + """ + forecast_probabilities = concatenate_cubes(forecast_probabilities) + + if no_of_percentiles is None: + no_of_percentiles = ( + len(forecast_probabilities.coord( + "probability_above_threshold").points)) + + percentiles = EnsembleCopulaCouplingUtilities.create_percentiles( + no_of_percentiles, sampling=sampling) + + # Extract bounds from dictionary of constants. + bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] + + forecast_at_percentiles = self._probabilities_to_percentiles( + forecast_probabilities, percentiles, bounds_pairing) + return forecast_at_percentiles + + +class GeneratePercentilesFromMeanAndVariance(object): + """ + Plugin focussing on generating percentiles from mean and variance. + In combination with the EnsembleReordering plugin, this is Ensemble + Copula Coupling. + """ + + def __init__(self): + """ + Initialise the class. + """ + pass + def _mean_and_variance_to_percentiles( self, calibrated_forecast_predictor, calibrated_forecast_variance, percentiles): @@ -190,66 +392,26 @@ def _mean_and_variance_to_percentiles( result = result.reshape( len(percentiles), len(t_coord.points), len(y_coord.points), len(x_coord.points)) - percentile_cube = self._create_cube_with_percentiles( - percentiles, calibrated_forecast_predictor, result) + percentile_cube = ( + EnsembleCopulaCouplingUtilities.create_cube_with_percentiles( + percentiles, calibrated_forecast_predictor, result)) percentile_cube.cell_methods = {} return percentile_cube - def _create_percentiles( - self, no_of_percentiles, sampling="quantile"): + def process(self, calibrated_forecast_predictor_and_variance, + raw_forecast): """ - Function to create percentiles. + Generate ensemble percentiles from the mean and variance. Parameters ---------- - no_of_percentiles : Int - Number of percentiles. - sampling : String - Type of sampling of the distribution to produce a set of - percentiles e.g. quantile or random. - Accepted options for sampling are: - Quantile: A regular set of equally-spaced percentiles aimed - at dividing a Cumulative Distribution Function into - blocks of equal probability. - Random: A random set of ordered percentiles. - - For further details, Flowerdew, J., 2014. - Calibrating ensemble reliability whilst preserving spatial structure. - Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. - Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. - Uncertainty Quantification in Complex Simulation Models Using Ensemble - Copula Coupling. - Statistical Science, 28(4), pp.616-640. - - Returns - ------- - percentiles : List - Percentiles calculated using the sampling technique specified. - - """ - if sampling in ["quantile"]: - percentiles = np.linspace( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles), - no_of_percentiles).tolist() - elif sampling in ["random"]: - percentiles = [] - for _ in range(no_of_percentiles): - percentiles.append( - random.uniform( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles))) - percentiles = sorted(percentiles) - else: - msg = "The {} sampling option is not yet implemented.".format( - sampling) - raise ValueError(msg) - return percentiles - - def process(self): - """ - Generate ensemble percentiles from the mean and variance. + calibrated_forecast_predictor_and_variance : Iris CubeList + CubeList containing the calibrated forecast predictor and + calibrated forecast variance. + raw_forecast : Iris Cube or CubeList + Cube or CubeList that is expected to be the raw + (uncalibrated) forecast. Returns ------- @@ -257,20 +419,22 @@ def process(self): Cube for calibrated percentiles. """ - raw_forecast = self.raw_forecast + (calibrated_forecast_predictor, calibrated_forecast_variance) = ( + calibrated_forecast_predictor_and_variance) calibrated_forecast_predictor = concatenate_cubes( - self.calibrated_forecast_predictor) + calibrated_forecast_predictor) calibrated_forecast_variance = concatenate_cubes( - self.calibrated_forecast_variance) + calibrated_forecast_variance) rename_coordinate( - self.raw_forecast, "ensemble_member_id", "realization") - raw_forecast_members = concatenate_cubes(self.raw_forecast) + raw_forecast, "ensemble_member_id", "realization") + raw_forecast_members = concatenate_cubes(raw_forecast) no_of_percentiles = len( raw_forecast_members.coord("realization").points) - percentiles = self._create_percentiles(no_of_percentiles) + percentiles = EnsembleCopulaCouplingUtilities.create_percentiles( + no_of_percentiles) calibrated_forecast_percentiles = ( self._mean_and_variance_to_percentiles( calibrated_forecast_predictor, @@ -293,37 +457,79 @@ class EnsembleReordering(object): Statistical Science, 28(4), pp.616-640. """ - def __init__(self, calibrated_forecast, raw_forecast): + def __init__(self): + """Initialise the class""" + pass + + def mismatch_between_length_of_raw_members_and_percentiles( + self, post_processed_forecast_percentiles, raw_forecast_members): """ + Function to determine whether there is a mismatch between the number + of percentiles and the number of raw forecast members. If more + percentiles are requested than ensemble members, then the ensemble + members are recycled. If fewer percentiles are requested than + ensemble members, then only the first n ensemble members are used. + Parameters ---------- - calibrated_forecast : Iris Cube or CubeList - The cube or cubelist containing the calibrated forecast members. - raw_forecast : Iris Cube or CubeList - The cube or cubelist containing the raw (uncalibrated) forecast. + post_processed_forecast_percentiles : cube + Cube for post-processed percentiles. The percentiles are assumed + to be in ascending order. + raw_forecast_members : cube + Cube containing the raw (not post-processed) forecasts. - """ - self.calibrated_forecast = calibrated_forecast - self.raw_forecast = raw_forecast + Returns + ------- + Iris cube + Cube for post-processed members where at a particular grid point, + the ranking of the values within the ensemble matches the ranking + from the raw ensemble. - def rank_ecc(self, calibrated_forecast_percentiles, raw_forecast_members): """ - Function to apply Ensemble Copula Coupling. This ranks the calibrated - forecast members based on a ranking determined from the raw forecast - members. + plen = len( + post_processed_forecast_percentiles.coord("percentile").points) + mlen = len(raw_forecast_members.coord("realization").points) + if plen == mlen: + pass + elif plen > mlen or plen < mlen: + raw_forecast_members_extended = iris.cube.CubeList() + realization_list = [] + mpoints = raw_forecast_members.coord("realization").points + for index in range(plen): + realization_list.append(mpoints[index % len(mpoints)]) + for realization, index in zip(realization_list, range(plen)): + constr = iris.Constraint(realization=realization) + raw_forecast_member = raw_forecast_members.extract(constr) + raw_forecast_member.coord("realization").points = index + raw_forecast_members_extended.append(raw_forecast_member) + raw_forecast_members = ( + concatenate_cubes(raw_forecast_members_extended)) + return post_processed_forecast_percentiles, raw_forecast_members + + def rank_ecc( + self, post_processed_forecast_percentiles, raw_forecast_members, + random_ordering=False): + """ + Function to apply Ensemble Copula Coupling. This ranks the + post-processed forecast members based on a ranking determined from + the raw forecast members. Parameters ---------- - calibrated_forecast_percentiles : cube - Cube for calibrated percentiles. The percentiles are assumed to be - in ascending order. + post_processed_forecast_percentiles : cube + Cube for post-processed percentiles. The percentiles are assumed + to be in ascending order. raw_forecast_members : cube - Cube containing the raw (uncalibrated) forecasts. + Cube containing the raw (not post-processed) forecasts. + random_ordering : Logical + If random_ordering is True, the post-processed forecasts are + reordered randomly, rather than using the ordering of the + raw ensemble. Returns ------- Iris cube - Cube for calibrated members where at a particular grid point, + Cube for post-processed members where at a particular grid point, the ranking of the values within the ensemble matches the ranking from the raw ensemble. @@ -331,37 +537,64 @@ def rank_ecc(self, calibrated_forecast_percentiles, raw_forecast_members): results = iris.cube.CubeList([]) for rawfc, calfc in zip( raw_forecast_members.slices_over("time"), - calibrated_forecast_percentiles.slices_over("time")): + post_processed_forecast_percentiles.slices_over("time")): random_data = np.random.random(rawfc.data.shape) - # Lexsort returns the indices sorted firstly by the primary key, - # the raw forecast data, and secondly by the secondary key, an - # array of random data, in order to split tied values randomly. - sorting_index = np.lexsort((random_data, rawfc.data), axis=0) + # Lexsort returns the indices sorted firstly by the + # primary key, the raw forecast data (unless random_ordering + # is enabled), and secondly by the secondary key, an array of + # random data, in order to split tied values randomly. + if random_ordering: + fake_rawfc_data = np.random.random(rawfc.data.shape) + sorting_index = ( + np.lexsort((random_data, fake_rawfc_data), axis=0)) + else: + sorting_index = np.lexsort((random_data, rawfc.data), axis=0) # Returns the indices that would sort the array. ranking = np.argsort(sorting_index, axis=0) - # Index the calibrated forecast data using the ranking array. + # Index the post-processed forecast data using the ranking array. # np.choose allows indexing of a 3d array using a 3d array, calfc.data = np.choose(ranking, calfc.data) results.append(calfc) return concatenate_cubes(results) - def process(self): + def process( + self, post_processed_forecast, raw_forecast, + random_ordering=False): """ + Reorder post-processed forecast using the ordering of the + raw ensemble. + + Parameters + ---------- + post_processed_forecast : Iris Cube or CubeList + The cube or cubelist containing the post-processed + forecast members. + raw_forecast : Iris Cube or CubeList + The cube or cubelist containing the raw (not post-processed) + forecast. + random_ordering : Logical + If random_ordering is True, the post-processed forecasts are + reordered randomly, rather than using the ordering of the + raw ensemble. + Returns ------- - calibrated_forecast_members : cube + post-processed_forecast_members : cube Cube for a new ensemble member where all points within the dataset are representative of a specified probability threshold across the whole domain. """ rename_coordinate( - self.raw_forecast, "ensemble_member_id", "realization") - calibrated_forecast_percentiles = concatenate_cubes( - self.calibrated_forecast, + raw_forecast, "ensemble_member_id", "realization") + post_processed_forecast_percentiles = concatenate_cubes( + post_processed_forecast, coords_to_slice_over=["percentile", "time"]) - raw_forecast_members = concatenate_cubes(self.raw_forecast) - calibrated_forecast_members = self.rank_ecc( - calibrated_forecast_percentiles, raw_forecast_members) + raw_forecast_members = concatenate_cubes(raw_forecast) + post_processed_forecast_percentiles, raw_forecast_members = ( + self.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members)) + post_processed_forecast_members = self.rank_ecc( + post_processed_forecast_percentiles, raw_forecast_members) rename_coordinate( - calibrated_forecast_members, "percentile", "realization") - return calibrated_forecast_members + post_processed_forecast_members, "percentile", "realization") + return post_processed_forecast_members diff --git a/lib/improver/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling_constants.py new file mode 100644 index 0000000000..5357f1a810 --- /dev/null +++ b/lib/improver/ensemble_copula_coupling_constants.py @@ -0,0 +1,5 @@ +"""Module to contain constants used for Ensemble Copula Coupling.""" + +# Specify the bounds for each phenomenon for creating the empirical +# cumulative distribution function. +bounds_for_ecdf = {"air_temperature": (-40, 50)} \ No newline at end of file diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py new file mode 100644 index 0000000000..e96f0a6c8a --- /dev/null +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -0,0 +1,169 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +Unit tests for the +`ensemble_copula_coupling.EnsemeblCopulaCouplingUtilities` class. +""" +import unittest + +import iris +from iris.coords import DimCoord +from iris.cube import Cube, CubeList +from iris.tests import IrisTest +import numpy as np + +from improver.ensemble_copula_coupling import ( + EnsembleCopulaCouplingUtilities as Plugin) +from improver.tests.helper_functions_ensemble_calibration import( + set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) + + +class Test_create_cube_with_percentiles(IrisTest): + + """Test the _create_cube_with_percentiles plugin.""" + + def setUp(self): + """Set up temperature cube.""" + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_forecast_cube + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = Plugin.create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + + def test_many_percentiles(self): + """ + Test that the plugin returns an Iris.cube.Cube with many + percentiles. + """ + cube = self.current_temperature_forecast_cube + percentiles = np.linspace(0, 1, 100) + cube_data = np.zeros( + [len(percentiles), len(cube.coord("time").points), + len(cube.coord("latitude").points), + len(cube.coord("longitude").points)]) + result = Plugin.create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertEqual(cube_data.shape, result.data.shape) + + def test_incompatible_percentiles(self): + """ + Test that the plugin fails if the percentile values requested + are not numbers. + """ + cube = self.current_temperature_forecast_cube + percentiles = ["cat", "dog", "elephant"] + cube_data = np.zeros( + [len(percentiles), len(cube.coord("time").points), + len(cube.coord("latitude").points), + len(cube.coord("longitude").points)]) + msg = "could not convert string to float" + with self.assertRaisesRegexp(ValueError, msg): + Plugin.create_cube_with_percentiles( + percentiles, cube, cube_data) + + def test_percentile_points(self): + """ + Test that the plugin returns an Iris.cube.Cube + with a percentile coordinate with the desired points. + """ + cube = self.current_temperature_forecast_cube + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = Plugin.create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result.coord("percentile"), DimCoord) + self.assertArrayAlmostEqual( + result.coord("percentile").points, percentiles) + + +class Test_create_percentiles(IrisTest): + + """Test the create_percentiles plugin.""" + + def setUp(self): + """Set up temperature cube.""" + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """ + Test that the plugin returns a list with the expected number of + percentiles. + """ + cube = self.current_temperature_forecast_cube + no_of_percentiles = 3 + result = Plugin.create_percentiles(no_of_percentiles) + self.assertIsInstance(result, list) + self.assertEqual(len(result), no_of_percentiles) + + def test_data(self): + """ + Test that the plugin returns a list with the expected data values + for the percentiles. + """ + data = np.array([0.25, 0.5, 0.75]) + + cube = self.current_temperature_forecast_cube + no_of_percentiles = 3 + result = Plugin.create_percentiles(no_of_percentiles) + self.assertArrayAlmostEqual(result, data) + + def test_random(self): + """ + Test that the plugin returns a list with the expected number of + percentiles, if the random sampling option is selected. + """ + cube = self.current_temperature_forecast_cube + no_of_percentiles = 3 + result = Plugin.create_percentiles( + no_of_percentiles, sampling="random") + self.assertIsInstance(result, list) + self.assertEqual(len(result), no_of_percentiles) + + def test_unknown_sampling_option(self): + """ + Test that the plugin returns the expected error message, + if an unknown sampling option is selected. + """ + cube = self.current_temperature_forecast_cube + no_of_percentiles = 3 + msg = "The unknown sampling option is not yet implemented" + with self.assertRaisesRegexp(ValueError, msg): + Plugin.create_percentiles( + no_of_percentiles, sampling="unknown") diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 76971516f8..3fd0b19dcb 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -30,7 +30,7 @@ # POSSIBILITY OF SUCH DAMAGE. """ Unit tests for the -`ensemble_copula_coupling.EnsembleReordering` +`ensemble_copula_coupling.EnsembleReordering` plugin. """ import unittest @@ -41,8 +41,96 @@ from improver.ensemble_copula_coupling import EnsembleReordering as Plugin from improver.tests.helper_functions_ensemble_calibration import( - set_up_temperature_cube, - add_forecast_reference_time_and_forecast_period) + set_up_cube, set_up_temperature_cube, + _add_forecast_reference_time_and_forecast_period) + + +class Test_mismatch_between_length_of_raw_members_and_percentiles(IrisTest): + + """ + Test the mismatch_between_length_of_raw_members_and_percentiles + method in the EnsembleReordering plugin. + """ + + def setUp(self): + """ + Create a cube with forecast_reference_time and + forecast_period coordinates. + """ + data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 1, 3, 3) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube(data, "air_temperature", "degreesC") + self.realization_cube = ( + _add_forecast_reference_time_and_forecast_period(cube.copy())) + cube.coord("realization").rename("percentile") + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period(cube)) + + def test_types_length_of_percentiles_equals_length_of_members(self): + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + self.assertIsInstance(result, tuple) + for aresult in result: + self.assertIsInstance(aresult, Cube) + + def test_types_length_of_percentiles_greater_than_length_of_members(self): + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + raw_forecast_members = raw_forecast_members[:2, :, :, :] + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + for aresult in result: + self.assertIsInstance(aresult, Cube) + + def test_types_length_of_percentiles_less_than_length_of_members(self): + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + post_processed_forecast_percentiles = ( + post_processed_forecast_percentiles[:2, :, :, :]) + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + for aresult in result: + self.assertIsInstance(aresult, Cube) + + def test_realization_for_equal(self): + data = [0, 1, 2] + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + self.assertArrayAlmostEqual( + data, result[1].coord("realization").points) + + def test_realization_for_greater_than(self): + data = [0, 1, 2] + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + raw_forecast_members = raw_forecast_members[:2, :, :, :] + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + self.assertArrayAlmostEqual( + data, result[1].coord("realization").points) + + def test_realization_for_less_than(self): + data = [0, 1] + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + post_processed_forecast_percentiles = ( + post_processed_forecast_percentiles[:2, :, :, :]) + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + self.assertArrayAlmostEqual( + data, result[1].coord("realization").points) class Test_rank_ecc(IrisTest): @@ -290,6 +378,97 @@ def test_2d_cube(self): result.transpose([1, 0]) self.assertArrayAlmostEqual(result.data, result_data) + def test_2d_cube_random_ordering(self): + """ + Test that the plugin returns the correct cube data for a + 2d input cube, if random ordering is selected. + """ + raw_data = np.array([[3], + [2], + [1]]) + + calibrated_data = np.array([[1], + [2], + [3]]) + + result_data_first = np.array([[1], + [2], + [3]]) + + result_data_second = np.array([[1], + [3], + [2]]) + + result_data_third = np.array([[2], + [1], + [3]]) + + result_data_fourth = np.array([[2], + [3], + [1]]) + + result_data_fifth = np.array([[3], + [1], + [2]]) + + result_data_sixth = np.array([[3], + [2], + [1]]) + + cube = self.cube.copy() + cube = cube[:, :, 0, 0] + raw_cube = cube.copy() + raw_cube.data = raw_data + calibrated_cube = cube.copy() + calibrated_cube.data = calibrated_data + + plugin = Plugin() + result = plugin.rank_ecc(calibrated_cube, raw_cube, + random_ordering=True) + result.transpose([1, 0]) + + err_count = 0 + try: + self.assertArrayAlmostEqual(result.data, result_data_first) + except Exception as err1: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_second) + except Exception as err2: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_third) + except Exception as err3: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_fourth) + except Exception as err4: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_fifth) + except Exception as err5: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_sixth) + except Exception as err6: + err_count += 1 + + if err_count == 6: + raise ValueError("Exceptions raised as all accepted forms of the " + "calibrated data were not matched." + "1. {}" + "2. {}" + "3. {}" + "4. {}" + "5. {}" + "6. {}".format(err1, err2, err3, + err4, err5, err6)) + class Test_process(IrisTest): @@ -306,11 +485,12 @@ def setUp(self): self.calibrated_cube = ( add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) + self.calibrated_cube.coord("realization").rename("percentile") def test_basic(self): """Test that the plugin returns an iris.cube.Cube.""" plugin = Plugin() - result = plugin.process(self.raw_cube, self.calibrated_cube) + result = plugin.process(self.calibrated_cube, self.raw_cube) self.assertIsInstance(result, Cube) self.assertTrue(result.coords("realization")) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index ede112561f..25366de39b 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -47,75 +47,6 @@ set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) -class Test__create_cube_with_percentiles(IrisTest): - - """Test the _create_cube_with_percentiles plugin.""" - - def setUp(self): - """Set up temperature cube.""" - self.current_temperature_forecast_cube = ( - add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - - def test_basic(self): - """Test that the plugin returns an Iris.cube.Cube.""" - cube = self.current_temperature_forecast_cube - cube_data = cube.data + 2 - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - - def test_many_percentiles(self): - """ - Test that the plugin returns an Iris.cube.Cube with many - percentiles. - """ - cube = self.current_temperature_forecast_cube - percentiles = np.linspace(0, 1, 100) - cube_data = np.zeros( - [len(percentiles), len(cube.coord("time").points), - len(cube.coord("latitude").points), - len(cube.coord("longitude").points)]) - plugin = Plugin() - result = plugin._create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertEqual(cube_data.shape, result.data.shape) - - def test_incompatible_percentiles(self): - """ - Test that the plugin fails if the percentile values requested - are not numbers. - """ - cube = self.current_temperature_forecast_cube - percentiles = ["cat", "dog", "elephant"] - cube_data = np.zeros( - [len(percentiles), len(cube.coord("time").points), - len(cube.coord("latitude").points), - len(cube.coord("longitude").points)]) - plugin = Plugin() - msg = "could not convert string to float" - with self.assertRaisesRegexp(ValueError, msg): - plugin._create_cube_with_percentiles( - percentiles, cube, cube_data) - - def test_percentile_points(self): - """ - Test that the plugin returns an Iris.cube.Cube - with a percentile coordinate with the desired points. - """ - cube = self.current_temperature_forecast_cube - cube_data = cube.data + 2 - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result.coord("percentile"), DimCoord) - self.assertArrayAlmostEqual( - result.coord("percentile").points, percentiles) - - class Test__mean_and_variance_to_percentiles(IrisTest): """Test the _mean_and_variance_to_percentiles plugin.""" @@ -320,69 +251,6 @@ def test_negative_percentiles(self): percentiles) -class Test_create_percentiles(IrisTest): - - """Test the create_percentiles plugin.""" - - def setUp(self): - """Set up temperature cube.""" - self.current_temperature_forecast_cube = ( - add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - - def test_basic(self): - """ - Test that the plugin returns a list with the expected number of - percentiles. - """ - cube = self.current_temperature_forecast_cube - no_of_percentiles = 3 - plugin = Plugin() - result = plugin._create_percentiles(no_of_percentiles) - self.assertIsInstance(result, list) - self.assertEqual(len(result), no_of_percentiles) - - def test_data(self): - """ - Test that the plugin returns a list with the expected data values - for the percentiles. - """ - data = np.array([0.25, 0.5, 0.75]) - - cube = self.current_temperature_forecast_cube - no_of_percentiles = 3 - plugin = Plugin() - result = plugin._create_percentiles(no_of_percentiles) - self.assertArrayAlmostEqual(result, data) - - def test_random(self): - """ - Test that the plugin returns a list with the expected number of - percentiles, if the random sampling option is selected. - """ - cube = self.current_temperature_forecast_cube - no_of_percentiles = 3 - plugin = Plugin() - result = plugin._create_percentiles( - no_of_percentiles, sampling="random") - self.assertIsInstance(result, list) - self.assertEqual(len(result), no_of_percentiles) - - def test_unknown_sampling_option(self): - """ - Test that the plugin returns the expected error message, - if an unknown sampling option is selected. - """ - cube = self.current_temperature_forecast_cube - no_of_percentiles = 3 - plugin = Plugin() - - msg = "The unknown sampling option is not yet implemented" - with self.assertRaisesRegexp(ValueError, msg): - plugin._create_percentiles( - no_of_percentiles, sampling="unknown") - - class Test_process(IrisTest): """Test the process plugin.""" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py new file mode 100644 index 0000000000..68f2c7bcaf --- /dev/null +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -0,0 +1,293 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +Unit tests for the +`plugins_ensemble_copula_coupling.GeneratePercentilesFromProbabilities` +class. + +""" +import numpy as np +import unittest + +from cf_units import Unit +import iris +from iris.coords import DimCoord +from iris.cube import Cube, CubeList +from iris.tests import IrisTest + +from improver.ensemble_copula_coupling import ( + GeneratePercentilesFromProbabilities as Plugin) +from improver.tests.helper_functions_ensemble_calibration import( + _add_forecast_reference_time_and_forecast_period) + + +def set_up_cube(data, phenomenon_standard_name, phenomenon_units, + forecast_thresholds=[8, 10, 12], + y_dimension_length=3, x_dimension_length=3): + """Create a cube containing multiple realizations.""" + cube = Cube(data, standard_name=phenomenon_standard_name, + units=phenomenon_units) + cube.add_dim_coord( + DimCoord(forecast_thresholds, + long_name='probability_above_threshold', units='degreesC'), 0) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_dim_coord(DimCoord([402192.5], + "time", units=tunit), 1) + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), + 'latitude', units='degrees'), 2) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, x_dimension_length), + 'longitude', units='degrees'), 3) + return cube + + +def set_up_temperature_cube(): + """Create a cube with metadata and values suitable for air temperature.""" + data = np.array([[[[1.0, 0.9, 1.0], + [0.8, 0.9, 0.5], + [0.5, 0.2, 0.0]]], + [[[1.0, 0.5, 1.0], + [0.5, 0.5, 0.3], + [0.2, 0.0, 0.0]]], + [[[1.0, 0.2, 0.5], + [0.2, 0.0, 0.1], + [0.0, 0.0, 0.0]]]]) + return set_up_cube(data, "air_temperature", "1") + + +class Test__probabilities_to_percentiles(IrisTest): + + """Test the _create_cube_with_percentiles plugin.""" + + def setUp(self): + """Set up temperature cube.""" + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertIsInstance(result, Cube) + + def test_check_data(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[[15.8, 31., 46.2], + [8., 10., 31.], + [10.4, 12., 42.4]]], + [[[-16., 10, 31.], + [8., 10., 11.6], + [-30.4, 8., 12.]]], + [[[-30.4, 8., 11.], + [-34., -10., 9], + [-35.2, -16., 3.2]]]]) + + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_check_single_threshold(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[[12.2, 29., 45.8], + [8., 26.66666667, 45.33333333], + [12.2, 29., 45.8]]], + [[[-16., 23.75, 44.75], + [8., 26.66666667, 45.33333333], + [-30.4, 8., 41.6]]], + [[[-30.4, 8., 41.6], + [-34., -10., 29.], + [-35.2, -16., 3.2]]]]) + + for acube in self.current_temperature_forecast_cube.slices_over( + "probability_above_threshold"): + cube = acube + break + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_lots_of_probability_thresholds(self): + """Test that the plugin returns an Iris.cube.Cube.""" + input_probs_1d = np.linspace(1, 0, 30) + input_probs = np.tile(input_probs_1d, (3, 3, 1, 1)).T + + data = np.array([[[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]], + [[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]], + [[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]]]) + + temperature_values = np.arange(0, 30) + cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube(input_probs, "air_temperature", "1", + forecast_thresholds=temperature_values))) + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_lots_of_percentiles(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[[ 13.9, 15.8, 17.7], + [19.6, 21.5, 23.4], + [25.3, 27.2, 29.1]]], + [[[31., 32.9, 34.8], + [36.7, 38.6, 40.5], + [42.4, 44.3, 46.2]]], + [[[48.1, -16., 8.], + [8.25, 8.5, 8.75], + [9., 9.25, 9.5]]], + [[[9.75, 10., 10.33333333], + [10.66666667, 11., 11.33333333], + [11.66666667, 12., 21.5]]], + [[[31., 40.5, 10.2], + [10.4, 10.6, 10.8], + [11., 11.2, 11.4]]], + [[[11.6, 11.8, 12.], + [15.8, 19.6, 23.4], + [27.2, 31., 34.8]]], + [[[38.6, 42.4, 46.2], + [-28., -16., -4.], + [8., 8.33333333, 8.66666667]]], + [[[9., 9.33333333, 9.66666667], + [10., 10.33333333, 10.66666667], + [11., 11.33333333, 11.66666667]]], + [[[12., 21.5, 31.], + [40.5, -16., 8.], + [8.25, 8.5, 8.75]]], + [[[9., 9.25, 9.5], + [9.75, 10., 10.2], + [10.4, 10.6, 10.8]]], + [[[11., 11.2, 11.4], + [11.6, 11.8, -35.2], + [-30.4, -25.6, -20.8]]], + [[[-16., -11.2, -6.4], + [-1.6, 3.2, 8.], + [8.5, 9., 9.5]]], + [[[10., 10.5, 11.], + [11.5, 12., 31.], + [-35.2, -30.4, -25.6]]], + [[[-20.8, -16., -11.2], + [-6.4, -1.6, 3.2], + [8., 8.33333333, 8.66666667]]], + [[[9., 9.33333333, 9.66666667], + [10., 10.5, 11.], + [11.5, -37., -34.]]], + [[[-31., -28., -25.], + [-22., -19., -16.], + [-13., -10., -7.]]], + [[[-4., -1., 2.], + [5., 8., 8.5], + [9., 9.5, -37.6]]], + [[[-35.2, -32.8, -30.4], + [-28., -25.6, -23.2], + [-20.8, -18.4, -16.]]], + [[[-13.6, -11.2, -8.8], + [-6.4, -4., -1.6], + [0.8, 3.2, 5.6]]]]) + cube = self.current_temperature_forecast_cube + percentiles = np.arange(0.05, 1.0, 0.05) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + +class Test_process(IrisTest): + + """Test the _create_cube_with_percentiles plugin.""" + + def setUp(self): + """Set up temperature cube.""" + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_check_data_specifying_percentiles(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[[21.5, 31., 40.5], + [8.75, 10., 11.66666667], + [11., 12., 31.]]], + [[[8.33333333, 10., 11.66666667], + [8.75, 10., 11.], + [-16., 8., 10.5]]], + [[[-16., 8., 9.66666667], + [-25., -10., 5.], + [-28., -16., -4.]]]]) + + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin.process( + cube, no_of_percentiles=len(percentiles)) + self.assertArrayAlmostEqual(result.data, data) + + def test_check_data_not_specifying_percentiles(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[[21.5, 31., 40.5], + [8.75, 10., 11.66666667], + [11., 12., 31.]]], + [[[8.33333333, 10., 11.66666667], + [8.75, 10., 11.], + [-16., 8., 10.5]]], + [[[-16., 8., 9.66666667], + [-25., -10., 5.], + [-28., -16., -4.]]]]) + + cube = self.current_temperature_forecast_cube + plugin = Plugin() + result = plugin.process(cube) + self.assertArrayAlmostEqual(result.data, data) + + +if __name__ == '__main__': + unittest.main() From e976a1ea5d07958832b4554b6eac48d5f4df017b Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 8 May 2017 09:19:52 +0100 Subject: [PATCH 0087/1367] Minor pep8 and pylint changes. --- lib/improver/ensemble_copula_coupling.py | 2 +- lib/improver/ensemble_copula_coupling_constants.py | 2 +- ...le_copula_coupling_GeneratePercentilesFromProbabilities.py | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling.py index ecbd23b29b..40b9c526b8 100644 --- a/lib/improver/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling.py @@ -40,7 +40,7 @@ import cf_units as unit import iris -from ensemble_calibration_utilities import ( +from ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) from ensemble_copula_coupling_constants import bounds_for_ecdf diff --git a/lib/improver/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling_constants.py index 5357f1a810..62d3cb6995 100644 --- a/lib/improver/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling_constants.py @@ -2,4 +2,4 @@ # Specify the bounds for each phenomenon for creating the empirical # cumulative distribution function. -bounds_for_ecdf = {"air_temperature": (-40, 50)} \ No newline at end of file +bounds_for_ecdf = {"air_temperature": (-40, 50)} diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 68f2c7bcaf..0aa28503a1 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -166,7 +166,7 @@ def test_lots_of_probability_thresholds(self): cube = ( _add_forecast_reference_time_and_forecast_period( set_up_cube(input_probs, "air_temperature", "1", - forecast_thresholds=temperature_values))) + forecast_thresholds=temperature_values))) percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() @@ -176,7 +176,7 @@ def test_lots_of_probability_thresholds(self): def test_lots_of_percentiles(self): """Test that the plugin returns an Iris.cube.Cube.""" - data = np.array([[[[ 13.9, 15.8, 17.7], + data = np.array([[[[13.9, 15.8, 17.7], [19.6, 21.5, 23.4], [25.3, 27.2, 29.1]]], [[[31., 32.9, 34.8], From 4a61c219c88ea4f2c690cd86835853afabe62a80 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 8 May 2017 09:26:57 +0100 Subject: [PATCH 0088/1367] Single pep8 change. --- ...nsemble_copula_coupling_EnsembleReordering.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 3fd0b19dcb..91bbb702aa 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -460,14 +460,14 @@ def test_2d_cube_random_ordering(self): if err_count == 6: raise ValueError("Exceptions raised as all accepted forms of the " - "calibrated data were not matched." - "1. {}" - "2. {}" - "3. {}" - "4. {}" - "5. {}" - "6. {}".format(err1, err2, err3, - err4, err5, err6)) + "calibrated data were not matched." + "1. {}" + "2. {}" + "3. {}" + "4. {}" + "5. {}" + "6. {}".format(err1, err2, err3, + err4, err5, err6)) class Test_process(IrisTest): From a9f0b80ca96818b46ea9a4877c878f8f2c1f5cc1 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 8 May 2017 15:05:52 +0100 Subject: [PATCH 0089/1367] Addition of functionality to handle spot forecasts, if the cube containing the spot forecasts has a locnum coordinate, as an index for each of the spot forecasts. Refactoring of Ensemble Copula Coupling into a module given that this processing is relatively independent. The utilities have been separated out into ensemble_copula_coupling_utilities.py. --- .../ensemble_copula_coupling/__init__.py | 0 .../ensemble_copula_coupling.py | 168 ++++-------------- .../ensemble_copula_coupling_constants.py | 0 .../ensemble_copula_coupling_utilities.py | 148 +++++++++++++++ .../helper_functions_ensemble_calibration.py | 32 +++- ...oupling_EnsembleCopulaCouplingUtilities.py | 46 +++-- ...mble_copula_coupling_EnsembleReordering.py | 3 +- ..._GeneratePercentilesFromMeanAndVariance.py | 40 ++++- ...ng_GeneratePercentilesFromProbabilities.py | 63 ++++++- 9 files changed, 349 insertions(+), 151 deletions(-) create mode 100644 lib/improver/ensemble_copula_coupling/__init__.py rename lib/improver/{ => ensemble_copula_coupling}/ensemble_copula_coupling.py (78%) rename lib/improver/{ => ensemble_copula_coupling}/ensemble_copula_coupling_constants.py (100%) create mode 100644 lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py diff --git a/lib/improver/ensemble_copula_coupling/__init__.py b/lib/improver/ensemble_copula_coupling/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/lib/improver/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py similarity index 78% rename from lib/improver/ensemble_copula_coupling.py rename to lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 40b9c526b8..80cb328624 100644 --- a/lib/improver/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -40,115 +40,12 @@ import cf_units as unit import iris -from ensemble_calibration.ensemble_calibration_utilities import ( +from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) -from ensemble_copula_coupling_constants import bounds_for_ecdf - - -class EnsembleCopulaCouplingUtilities(object): - """ - Class containing utilities used to enable Ensemble Copula Coupling. - """ - @staticmethod - def create_percentiles( - no_of_percentiles, sampling="quantile"): - """ - Function to create percentiles. - - Parameters - ---------- - no_of_percentiles : Int - Number of percentiles. - sampling : String - Type of sampling of the distribution to produce a set of - percentiles e.g. quantile or random. - Accepted options for sampling are: - Quantile: A regular set of equally-spaced percentiles aimed - at dividing a Cumulative Distribution Function into - blocks of equal probability. - Random: A random set of ordered percentiles. - - For further details, Flowerdew, J., 2014. - Calibrating ensemble reliability whilst preserving spatial structure. - Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. - Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. - Uncertainty Quantification in Complex Simulation Models Using Ensemble - Copula Coupling. - Statistical Science, 28(4), pp.616-640. - - Returns - ------- - percentiles : List - Percentiles calculated using the sampling technique specified. - - """ - if sampling in ["quantile"]: - percentiles = np.linspace( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles), - no_of_percentiles).tolist() - elif sampling in ["random"]: - percentiles = [] - for _ in range(no_of_percentiles): - percentiles.append( - random.uniform( - 1/float(1+no_of_percentiles), - no_of_percentiles/float(1+no_of_percentiles))) - percentiles = sorted(percentiles) - else: - msg = "The {} sampling option is not yet implemented.".format( - sampling) - raise ValueError(msg) - return percentiles - - @staticmethod - def create_cube_with_percentiles( - percentiles, template_cube, cube_data): - """ - Create a cube with a percentile coordinate based on a template cube. - - Parameters - ---------- - percentiles : List - Ensemble percentiles. - template_cube : Iris cube - Cube to copy majority of coordinate definitions from. - cube_data : Numpy array - Data to insert into the template cube. - The data is expected to have the shape of - percentiles (0th dimension), time (1st dimension), - y_coord (2nd dimension), x_coord (3rd dimension). - - Returns - ------- - String - Coordinate name of the matched coordinate. - - """ - percentile_coord = iris.coords.DimCoord( - np.float32(percentiles), long_name="percentile", - units=unit.Unit("1"), var_name="percentile") - - time_coord = template_cube.coord("time") - y_coord = template_cube.coord(axis="y") - x_coord = template_cube.coord(axis="x") - - dim_coords_and_dims = [ - (percentile_coord, 0), (time_coord, 1), - (y_coord, 2), (x_coord, 3)] - - frt_coord = template_cube.coord("forecast_reference_time") - fp_coord = template_cube.coord("forecast_period") - aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] - - metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) - - cube = iris.cube.Cube( - cube_data, dim_coords_and_dims=dim_coords_and_dims, - aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) - cube.attributes = template_cube.attributes - cube.cell_methods = template_cube.cell_methods - return cube +from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ + import bounds_for_ecdf +from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ + import create_cube_with_percentiles, create_percentiles class GeneratePercentilesFromProbabilities(object): @@ -245,16 +142,22 @@ def _probabilities_to_percentiles( percentiles, probabilities_for_cdf[index, :], threshold_points) - t_coord = forecast_probabilities.coord("time") - y_coord = forecast_probabilities.coord(axis="y") - x_coord = forecast_probabilities.coord(axis="x") - - forecast_at_percentiles = forecast_at_percentiles.reshape( - len(percentiles), len(t_coord.points), len(y_coord.points), - len(x_coord.points)) - percentile_cube = ( - EnsembleCopulaCouplingUtilities.create_cube_with_percentiles( - percentiles, forecast_probabilities, forecast_at_percentiles)) + if forecast_probabilities.coords("locnum"): + t_coord = forecast_probabilities.coord("time") + locnum_coord = forecast_probabilities.coord("locnum") + forecast_at_percentiles = forecast_at_percentiles.reshape( + len(percentiles), len(t_coord.points), + len(locnum_coord.points)) + else: + t_coord = forecast_probabilities.coord("time") + y_coord = forecast_probabilities.coord(axis="y") + x_coord = forecast_probabilities.coord(axis="x") + forecast_at_percentiles = forecast_at_percentiles.reshape( + len(percentiles), len(t_coord.points), len(y_coord.points), + len(x_coord.points)) + + percentile_cube = create_cube_with_percentiles( + percentiles, forecast_probabilities, forecast_at_percentiles) percentile_cube.cell_methods = {} return percentile_cube @@ -297,7 +200,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, len(forecast_probabilities.coord( "probability_above_threshold").points)) - percentiles = EnsembleCopulaCouplingUtilities.create_percentiles( + percentiles = create_percentiles( no_of_percentiles, sampling=sampling) # Extract bounds from dictionary of constants. @@ -385,16 +288,22 @@ def _mean_and_variance_to_percentiles( result = result.T - t_coord = calibrated_forecast_predictor.coord("time") - y_coord = calibrated_forecast_predictor.coord(axis="y") - x_coord = calibrated_forecast_predictor.coord(axis="x") + if calibrated_forecast_predictor.coords("locnum"): + t_coord = calibrated_forecast_predictor.coord("time") + locnum_coord = calibrated_forecast_predictor.coord("locnum") + result = result.reshape( + len(percentiles), len(t_coord.points), + len(locnum_coord.points)) + else: + t_coord = calibrated_forecast_predictor.coord("time") + y_coord = calibrated_forecast_predictor.coord(axis="y") + x_coord = calibrated_forecast_predictor.coord(axis="x") + result = result.reshape( + len(percentiles), len(t_coord.points), len(y_coord.points), + len(x_coord.points)) - result = result.reshape( - len(percentiles), len(t_coord.points), len(y_coord.points), - len(x_coord.points)) - percentile_cube = ( - EnsembleCopulaCouplingUtilities.create_cube_with_percentiles( - percentiles, calibrated_forecast_predictor, result)) + percentile_cube = create_cube_with_percentiles( + percentiles, calibrated_forecast_predictor, result) percentile_cube.cell_methods = {} return percentile_cube @@ -433,8 +342,7 @@ def process(self, calibrated_forecast_predictor_and_variance, no_of_percentiles = len( raw_forecast_members.coord("realization").points) - percentiles = EnsembleCopulaCouplingUtilities.create_percentiles( - no_of_percentiles) + percentiles = create_percentiles(no_of_percentiles) calibrated_forecast_percentiles = ( self._mean_and_variance_to_percentiles( calibrated_forecast_predictor, diff --git a/lib/improver/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py similarity index 100% rename from lib/improver/ensemble_copula_coupling_constants.py rename to lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py new file mode 100644 index 0000000000..7a78f3d63b --- /dev/null +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -0,0 +1,148 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +This module defines the utilities required for Ensemble Copula Coupling +plugins. + +""" +import copy +import numpy as np +import random + +import cf_units as unit +import iris + + +def create_percentiles( + no_of_percentiles, sampling="quantile"): + """ + Function to create percentiles. + + Parameters + ---------- + no_of_percentiles : Int + Number of percentiles. + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + For further details, Flowerdew, J., 2014. + Calibrating ensemble reliability whilst preserving spatial structure. + Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + Returns + ------- + percentiles : List + Percentiles calculated using the sampling technique specified. + + """ + if sampling in ["quantile"]: + percentiles = np.linspace( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles), + no_of_percentiles).tolist() + elif sampling in ["random"]: + percentiles = [] + for _ in range(no_of_percentiles): + percentiles.append( + random.uniform( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles))) + percentiles = sorted(percentiles) + else: + msg = "The {} sampling option is not yet implemented.".format( + sampling) + raise ValueError(msg) + return percentiles + + +def create_cube_with_percentiles( + percentiles, template_cube, cube_data): + """ + Create a cube with a percentile coordinate based on a template cube. + + Parameters + ---------- + percentiles : List + Ensemble percentiles. + template_cube : Iris cube + Cube to copy majority of coordinate definitions from. + cube_data : Numpy array + Data to insert into the template cube. + The data is expected to have the shape of + percentiles (0th dimension), time (1st dimension), + y_coord (2nd dimension), x_coord (3rd dimension). + + Returns + ------- + String + Coordinate name of the matched coordinate. + + """ + percentile_coord = iris.coords.DimCoord( + np.float32(percentiles), long_name="percentile", + units=unit.Unit("1"), var_name="percentile") + + if template_cube.coords("locnum"): + time_coord = template_cube.coord("time") + locnum_coord = template_cube.coord("locnum") + dim_coords_and_dims = [ + (percentile_coord, 0), (time_coord, 1), + (locnum_coord, 2)] + else: + time_coord = template_cube.coord("time") + y_coord = template_cube.coord(axis="y") + x_coord = template_cube.coord(axis="x") + dim_coords_and_dims = [ + (percentile_coord, 0), (time_coord, 1), + (y_coord, 2), (x_coord, 3)] + + frt_coord = template_cube.coord("forecast_reference_time") + fp_coord = template_cube.coord("forecast_period") + aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] + + metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) + + cube = iris.cube.Cube( + cube_data, dim_coords_and_dims=dim_coords_and_dims, + aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) + cube.attributes = template_cube.attributes + cube.cell_methods = template_cube.cell_methods + return cube diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 6e11f12787..78eb37e564 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -35,7 +35,7 @@ from cf_units import Unit import iris -from iris.coords import DimCoord +from iris.coords import AuxCoord, DimCoord from iris.cube import Cube, CubeList import numpy as np @@ -71,6 +71,36 @@ def set_up_temperature_cube(): return set_up_cube(data, "air_temperature", "K") +def set_up_spot_cube(data, phenomenon_standard_name, phenomenon_units): + """Create a cube containing multiple realizations.""" + cube = Cube(data, standard_name=phenomenon_standard_name, + units=phenomenon_units) + cube.add_dim_coord(DimCoord([0, 1, 2], 'realization', + units='1'), 0) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_dim_coord(DimCoord([402192.5], + "time", units=tunit), 1) + cube.add_dim_coord(DimCoord(np.arange(9), long_name='locnum', + units="1"), 2) + cube.add_aux_coord(AuxCoord(np.linspace(-45.0, 45.0, 9), 'latitude', + units='degrees'), data_dims=2) + cube.add_aux_coord(AuxCoord(np.linspace(120, 180, 9), 'longitude', + units='degrees'), data_dims=2) + return cube + + +def set_up_spot_temperature_cube(): + """Create a cube with metadata and values suitable for air temperature.""" + data = (np.tile(np.linspace(-45.0, 45.0, 9), 3).reshape(3, 1, 9) + + 273.15) + data[0] -= 2 + data[1] += 2 + data[2] += 4 + return set_up_spot_cube(data, "air_temperature", "K") + + def set_up_wind_speed_cube(): """Create a cube with metadata and values suitable for wind speed.""" data = np.tile(np.linspace(0, 60, 9), 3).reshape(3, 1, 3, 3) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index e96f0a6c8a..22a2497cba 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -40,10 +40,11 @@ from iris.tests import IrisTest import numpy as np -from improver.ensemble_copula_coupling import ( - EnsembleCopulaCouplingUtilities as Plugin) -from improver.tests.helper_functions_ensemble_calibration import( - set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) +from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ + import create_percentiles, create_cube_with_percentiles +from improver.tests.helper_functions_ensemble_calibration import ( + set_up_spot_temperature_cube, set_up_temperature_cube, + _add_forecast_reference_time_and_forecast_period) class Test_create_cube_with_percentiles(IrisTest): @@ -55,13 +56,16 @@ def setUp(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) + self.current_temperature_spot_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" cube = self.current_temperature_forecast_cube cube_data = cube.data + 2 percentiles = [0.1, 0.5, 0.9] - result = Plugin.create_cube_with_percentiles( + result = create_cube_with_percentiles( percentiles, cube, cube_data) self.assertIsInstance(result, Cube) @@ -76,7 +80,7 @@ def test_many_percentiles(self): [len(percentiles), len(cube.coord("time").points), len(cube.coord("latitude").points), len(cube.coord("longitude").points)]) - result = Plugin.create_cube_with_percentiles( + result = create_cube_with_percentiles( percentiles, cube, cube_data) self.assertEqual(cube_data.shape, result.data.shape) @@ -93,8 +97,7 @@ def test_incompatible_percentiles(self): len(cube.coord("longitude").points)]) msg = "could not convert string to float" with self.assertRaisesRegexp(ValueError, msg): - Plugin.create_cube_with_percentiles( - percentiles, cube, cube_data) + create_cube_with_percentiles(percentiles, cube, cube_data) def test_percentile_points(self): """ @@ -104,8 +107,19 @@ def test_percentile_points(self): cube = self.current_temperature_forecast_cube cube_data = cube.data + 2 percentiles = [0.1, 0.5, 0.9] - result = Plugin.create_cube_with_percentiles( + result = create_cube_with_percentiles(percentiles, cube, cube_data) + self.assertIsInstance(result.coord("percentile"), DimCoord) + self.assertArrayAlmostEqual( + result.coord("percentile").points, percentiles) + + def test_spot_forecasts_percentile_points(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_spot_forecast_cube + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) self.assertIsInstance(result.coord("percentile"), DimCoord) self.assertArrayAlmostEqual( result.coord("percentile").points, percentiles) @@ -128,7 +142,7 @@ def test_basic(self): """ cube = self.current_temperature_forecast_cube no_of_percentiles = 3 - result = Plugin.create_percentiles(no_of_percentiles) + result = create_percentiles(no_of_percentiles) self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) @@ -141,7 +155,7 @@ def test_data(self): cube = self.current_temperature_forecast_cube no_of_percentiles = 3 - result = Plugin.create_percentiles(no_of_percentiles) + result = create_percentiles(no_of_percentiles) self.assertArrayAlmostEqual(result, data) def test_random(self): @@ -151,8 +165,7 @@ def test_random(self): """ cube = self.current_temperature_forecast_cube no_of_percentiles = 3 - result = Plugin.create_percentiles( - no_of_percentiles, sampling="random") + result = create_percentiles(no_of_percentiles, sampling="random") self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) @@ -165,5 +178,8 @@ def test_unknown_sampling_option(self): no_of_percentiles = 3 msg = "The unknown sampling option is not yet implemented" with self.assertRaisesRegexp(ValueError, msg): - Plugin.create_percentiles( - no_of_percentiles, sampling="unknown") + create_percentiles(no_of_percentiles, sampling="unknown") + + +if __name__ == '__main__': + unittest.main() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 91bbb702aa..6c4d176855 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -39,7 +39,8 @@ from iris.tests import IrisTest import numpy as np -from improver.ensemble_copula_coupling import EnsembleReordering as Plugin +from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( + EnsembleReordering as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_cube, set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 25366de39b..79de89d400 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -41,10 +41,11 @@ from iris.tests import IrisTest import numpy as np -from improver.ensemble_copula_coupling import ( +from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromMeanAndVariance as Plugin) from improver.tests.helper_functions_ensemble_calibration import( - set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) + set_up_spot_temperature_cube, set_up_temperature_cube, + _add_forecast_reference_time_and_forecast_period) class Test__mean_and_variance_to_percentiles(IrisTest): @@ -56,6 +57,9 @@ def setUp(self): self.current_temperature_forecast_cube = ( add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) + self.current_temperature_spot_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_spot_temperature_cube())) def test_check_data(self): """ @@ -250,6 +254,38 @@ def test_negative_percentiles(self): current_forecast_predictor, current_forecast_variance, percentiles) + def test_spot_forecasts_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube matching the expected + data values when a cube containing mean and variance is passed in. + The resulting data values are the percentiles, which have been + generated. + """ + data = np.array([[[225.56812863, 236.81812863, 248.06812863, + 259.31812863, 270.56812863, 281.81812863, + 293.06812863, 304.31812863, 315.56812863]], + [[229.48333333, 240.73333333, 251.98333333, + 263.23333333, 274.48333333, 285.73333333, + 296.98333333, 308.23333333, 319.48333333]], + [[233.39853804, 244.64853804, 255.89853804, + 267.14853804, 278.39853804, 289.64853804, + 300.89853804, 312.14853804, 323.39853804]]]) + + cube = self.current_temperature_spot_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + current_forecast_predictor_and_variance = ( + current_forecast_predictor, current_forecast_variance) + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual(result.data, data) + class Test_process(IrisTest): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 0aa28503a1..903078a56d 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -39,11 +39,11 @@ from cf_units import Unit import iris -from iris.coords import DimCoord +from iris.coords import AuxCoord, DimCoord from iris.cube import Cube, CubeList from iris.tests import IrisTest -from improver.ensemble_copula_coupling import ( +from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromProbabilities as Plugin) from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period) @@ -84,6 +84,43 @@ def set_up_temperature_cube(): return set_up_cube(data, "air_temperature", "1") +def set_up_spot_cube(data, phenomenon_standard_name, phenomenon_units, + forecast_thresholds=[8, 10, 12], + y_dimension_length=9, x_dimension_length=9): + """Create a cube containing multiple realizations.""" + cube = Cube(data, standard_name=phenomenon_standard_name, + units=phenomenon_units) + cube.add_dim_coord( + DimCoord(forecast_thresholds, + long_name='probability_above_threshold', units='degreesC'), 0) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_dim_coord(DimCoord([402192.5], + "time", units=tunit), 1) + cube.add_dim_coord(DimCoord(np.arange(9), long_name='locnum', + units="1"), 2) + cube.add_aux_coord(AuxCoord(np.linspace(-45.0, 45.0, y_dimension_length), + 'latitude', units='degrees'), data_dims=2) + cube.add_aux_coord(AuxCoord(np.linspace(120, 180, x_dimension_length), + 'longitude', units='degrees'), data_dims=2) + return cube + + +def set_up_spot_temperature_cube(): + """Create a cube with metadata and values suitable for air temperature.""" + data = np.array([[[1.0, 0.9, 1.0, + 0.8, 0.9, 0.5, + 0.5, 0.2, 0.0]], + [[1.0, 0.5, 1.0, + 0.5, 0.5, 0.3, + 0.2, 0.0, 0.0]], + [[1.0, 0.2, 0.5, + 0.2, 0.0, 0.1, + 0.0, 0.0, 0.0]]]) + return set_up_spot_cube(data, "air_temperature", "1") + + class Test__probabilities_to_percentiles(IrisTest): """Test the _create_cube_with_percentiles plugin.""" @@ -93,6 +130,9 @@ def setUp(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) + self.current_temperature_spot_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_spot_temperature_cube())) def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" @@ -241,6 +281,25 @@ def test_lots_of_percentiles(self): cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) + def test_check_data_spot_forecasts(self): + """Test that the plugin returns an Iris.cube.Cube.""" + data = np.array([[[15.8, 31., 46.2, + 8., 10., 31., + 10.4, 12., 42.4]], + [[-16., 10, 31., + 8., 10., 11.6, + -30.4, 8., 12.]], + [[-30.4, 8., 11., + -34., -10., 9, + -35.2, -16., 3.2]]]) + cube = self.current_temperature_spot_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + class Test_process(IrisTest): From ff7fdf433f4b21c64f30dc836187aff8abcac050 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 8 May 2017 16:35:41 +0100 Subject: [PATCH 0090/1367] Code style improvements from using Pylint, addition of missing docstrings from unit tests and addition of extra unit test for testing random ordering within the EnsembleReordering plugin. --- .../ensemble_copula_coupling.py | 17 +- .../ensemble_copula_coupling_utilities.py | 6 +- ...oupling_EnsembleCopulaCouplingUtilities.py | 17 +- ...mble_copula_coupling_EnsembleReordering.py | 171 +++++++++++++++--- ..._GeneratePercentilesFromMeanAndVariance.py | 17 +- ...ng_GeneratePercentilesFromProbabilities.py | 68 +++++-- 6 files changed, 210 insertions(+), 86 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 80cb328624..4f3e134e34 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -32,12 +32,10 @@ This module defines the plugins required for Ensemble Copula Coupling. """ -import copy import numpy as np -import random from scipy.stats import norm -import cf_units as unit + import iris from improver.ensemble_calibration.ensemble_calibration_utilities import ( @@ -389,9 +387,9 @@ def mismatch_between_length_of_raw_members_and_percentiles( Returns ------- Iris cube - Cube for post-processed members where at a particular grid point, - the ranking of the values within the ensemble matches the ranking - from the raw ensemble. + Cube for the raw ensemble forecast, where the raw ensemble members + have either been recycled or constrained, depending upon the + number of percentiles present in the post-processed forecast cube. """ plen = len( @@ -412,7 +410,7 @@ def mismatch_between_length_of_raw_members_and_percentiles( raw_forecast_members_extended.append(raw_forecast_member) raw_forecast_members = ( concatenate_cubes(raw_forecast_members_extended)) - return post_processed_forecast_percentiles, raw_forecast_members + return raw_forecast_members def rank_ecc( self, post_processed_forecast_percentiles, raw_forecast_members, @@ -498,11 +496,12 @@ def process( post_processed_forecast, coords_to_slice_over=["percentile", "time"]) raw_forecast_members = concatenate_cubes(raw_forecast) - post_processed_forecast_percentiles, raw_forecast_members = ( + raw_forecast_members = ( self.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members)) post_processed_forecast_members = self.rank_ecc( - post_processed_forecast_percentiles, raw_forecast_members) + post_processed_forecast_percentiles, raw_forecast_members, + random_ordering=random_ordering) rename_coordinate( post_processed_forecast_members, "percentile", "realization") return post_processed_forecast_members diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 7a78f3d63b..1e05e20b20 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -41,8 +41,7 @@ import iris -def create_percentiles( - no_of_percentiles, sampling="quantile"): +def create_percentiles(no_of_percentiles, sampling="quantile"): """ Function to create percentiles. @@ -93,8 +92,7 @@ def create_percentiles( return percentiles -def create_cube_with_percentiles( - percentiles, template_cube, cube_data): +def create_cube_with_percentiles(percentiles, template_cube, cube_data): """ Create a cube with a percentile coordinate based on a template cube. diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 22a2497cba..445c971248 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -34,17 +34,15 @@ """ import unittest -import iris from iris.coords import DimCoord -from iris.cube import Cube, CubeList +from iris.cube import Cube from iris.tests import IrisTest import numpy as np from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import create_percentiles, create_cube_with_percentiles from improver.tests.helper_functions_ensemble_calibration import ( - set_up_spot_temperature_cube, set_up_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) class Test_create_cube_with_percentiles(IrisTest): @@ -129,18 +127,11 @@ class Test_create_percentiles(IrisTest): """Test the create_percentiles plugin.""" - def setUp(self): - """Set up temperature cube.""" - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - def test_basic(self): """ Test that the plugin returns a list with the expected number of percentiles. """ - cube = self.current_temperature_forecast_cube no_of_percentiles = 3 result = create_percentiles(no_of_percentiles) self.assertIsInstance(result, list) @@ -152,8 +143,6 @@ def test_data(self): for the percentiles. """ data = np.array([0.25, 0.5, 0.75]) - - cube = self.current_temperature_forecast_cube no_of_percentiles = 3 result = create_percentiles(no_of_percentiles) self.assertArrayAlmostEqual(result, data) @@ -163,7 +152,6 @@ def test_random(self): Test that the plugin returns a list with the expected number of percentiles, if the random sampling option is selected. """ - cube = self.current_temperature_forecast_cube no_of_percentiles = 3 result = create_percentiles(no_of_percentiles, sampling="random") self.assertIsInstance(result, list) @@ -174,7 +162,6 @@ def test_unknown_sampling_option(self): Test that the plugin returns the expected error message, if an unknown sampling option is selected. """ - cube = self.current_temperature_forecast_cube no_of_percentiles = 3 msg = "The unknown sampling option is not yet implemented" with self.assertRaisesRegexp(ValueError, msg): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 6c4d176855..a39b4c18c7 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -55,7 +55,8 @@ class Test_mismatch_between_length_of_raw_members_and_percentiles(IrisTest): def setUp(self): """ - Create a cube with forecast_reference_time and + Create a cube with a realization coordinate and a cube with a + percentile coordinate with forecast_reference_time and forecast_period coordinates. """ data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 1, 3, 3) @@ -70,26 +71,40 @@ def setUp(self): _add_forecast_reference_time_and_forecast_period(cube)) def test_types_length_of_percentiles_equals_length_of_members(self): + """ + Test to check the behaviour whether the number of percentiles equals + the number of members. For when the length of the percentiles equals + the length of the members, check that a Cube is returned. + """ post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - self.assertIsInstance(result, tuple) - for aresult in result: - self.assertIsInstance(aresult, Cube) + self.assertIsInstance(result, Cube) def test_types_length_of_percentiles_greater_than_length_of_members(self): + """ + Test to check the behaviour whether the number of percentiles is + greater than the number of members. For when the length of the + percentiles is greater than the length of the members, check that a + Cube is returned. + """ post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - for aresult in result: - self.assertIsInstance(aresult, Cube) + self.assertIsInstance(result, Cube) def test_types_length_of_percentiles_less_than_length_of_members(self): + """ + Test to check the behaviour whether the number of percentiles is + less than the number of members. For when the length of the + percentiles is less than the length of the members, check that a + Cube is returned. + """ post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube post_processed_forecast_percentiles = ( @@ -97,10 +112,15 @@ def test_types_length_of_percentiles_less_than_length_of_members(self): plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - for aresult in result: - self.assertIsInstance(aresult, Cube) + self.assertIsInstance(result, Cube) def test_realization_for_equal(self): + """ + Test to check the behaviour whether the number of percentiles equals + the number of members. For when the length of the percentiles equals + the length of the members, check that the points of the realization + coordinate is as expected. + """ data = [0, 1, 2] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube @@ -108,9 +128,15 @@ def test_realization_for_equal(self): result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual( - data, result[1].coord("realization").points) + data, result.coord("realization").points) def test_realization_for_greater_than(self): + """ + Test to check the behaviour whether the number of percentiles is + greater than the number of members. For when the length of the + percentiles is greater than the length of the members, check that the + points of the realization coordinate is as expected. + """ data = [0, 1, 2] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube @@ -119,9 +145,15 @@ def test_realization_for_greater_than(self): result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual( - data, result[1].coord("realization").points) + data, result.coord("realization").points) def test_realization_for_less_than(self): + """ + Test to check the behaviour whether the number of percentiles is + less than the number of members. For when the length of the + percentiles is less than the length of the members, check that the + points of the realization coordinate is as expected. + """ data = [0, 1] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube @@ -131,7 +163,7 @@ def test_realization_for_less_than(self): result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual( - data, result[1].coord("realization").points) + data, result.coord("realization").points) class Test_rank_ecc(IrisTest): @@ -383,6 +415,9 @@ def test_2d_cube_random_ordering(self): """ Test that the plugin returns the correct cube data for a 2d input cube, if random ordering is selected. + + Random ordering does not use the ordering from the raw ensemble, + and instead just orders the input values randomly. """ raw_data = np.array([[3], [2], @@ -397,16 +432,16 @@ def test_2d_cube_random_ordering(self): [3]]) result_data_second = np.array([[1], - [3], - [2]]) + [3], + [2]]) result_data_third = np.array([[2], [1], [3]]) result_data_fourth = np.array([[2], - [3], - [1]]) + [3], + [1]]) result_data_fifth = np.array([[3], [1], @@ -431,32 +466,32 @@ def test_2d_cube_random_ordering(self): err_count = 0 try: self.assertArrayAlmostEqual(result.data, result_data_first) - except Exception as err1: + except AssertionError as err1: err_count += 1 try: self.assertArrayAlmostEqual(result.data, result_data_second) - except Exception as err2: + except AssertionError as err2: err_count += 1 try: self.assertArrayAlmostEqual(result.data, result_data_third) - except Exception as err3: + except AssertionError as err3: err_count += 1 try: self.assertArrayAlmostEqual(result.data, result_data_fourth) - except Exception as err4: + except AssertionError as err4: err_count += 1 try: self.assertArrayAlmostEqual(result.data, result_data_fifth) - except Exception as err5: + except AssertionError as err5: err_count += 1 try: self.assertArrayAlmostEqual(result.data, result_data_sixth) - except Exception as err6: + except AssertionError as err6: err_count += 1 if err_count == 6: @@ -489,12 +524,104 @@ def setUp(self): self.calibrated_cube.coord("realization").rename("percentile") def test_basic(self): - """Test that the plugin returns an iris.cube.Cube.""" + """ + Test that the plugin returns an iris.cube.Cube and the cube has a + realization coordinate. + """ plugin = Plugin() result = plugin.process(self.calibrated_cube, self.raw_cube) self.assertIsInstance(result, Cube) self.assertTrue(result.coords("realization")) + def test_2d_cube_random_ordering(self): + """ + Test that the plugin returns the correct cube data for a + 2d input cube, if random ordering is selected. + """ + raw_data = np.array([[3], + [2], + [1]]) + + calibrated_data = np.array([[1], + [2], + [3]]) + + result_data_first = np.array([[1], + [2], + [3]]) + + result_data_second = np.array([[1], + [3], + [2]]) + + result_data_third = np.array([[2], + [1], + [3]]) + + result_data_fourth = np.array([[2], + [3], + [1]]) + + result_data_fifth = np.array([[3], + [1], + [2]]) + + result_data_sixth = np.array([[3], + [2], + [1]]) + + raw_cube = self.raw_cube[:, :, 0, 0] + raw_cube.data = raw_data + calibrated_cube = self.calibrated_cube[:, :, 0, 0] + calibrated_cube.data = calibrated_data + + plugin = Plugin() + result = plugin.process(calibrated_cube, raw_cube, + random_ordering=True) + result.transpose([1, 0]) + + err_count = 0 + try: + self.assertArrayAlmostEqual(result.data, result_data_first) + except AssertionError as err1: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_second) + except AssertionError as err2: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_third) + except AssertionError as err3: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_fourth) + except AssertionError as err4: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_fifth) + except AssertionError as err5: + err_count += 1 + + try: + self.assertArrayAlmostEqual(result.data, result_data_sixth) + except AssertionError as err6: + err_count += 1 + + if err_count == 6: + raise ValueError("Exceptions raised as all accepted forms of the " + "calibrated data were not matched." + "1. {}" + "2. {}" + "3. {}" + "4. {}" + "5. {}" + "6. {}".format(err1, err2, err3, + err4, err5, err6)) + if __name__ == '__main__': unittest.main() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 79de89d400..5141cfa58b 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -36,7 +36,6 @@ import unittest import iris -from iris.coords import DimCoord from iris.cube import Cube, CubeList from iris.tests import IrisTest import numpy as np @@ -83,8 +82,6 @@ def test_check_data(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [0.1, 0.5, 0.9] plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( @@ -125,8 +122,6 @@ def test_simple_data(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [0.1, 0.5, 0.9] plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( @@ -164,8 +159,6 @@ def test_if_identical_data(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [0.1, 0.5, 0.9] plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( @@ -207,8 +200,6 @@ def test_if_nearly_identical_data(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [0.1, 0.5, 0.9] plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( @@ -226,8 +217,6 @@ def test_many_percentiles(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = np.linspace(0.01, 0.99, num=1000, endpoint=True) plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( @@ -244,8 +233,6 @@ def test_negative_percentiles(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [-0.1, 0.1] plugin = Plugin() msg = "NaNs are present within the result for the" @@ -259,7 +246,7 @@ def test_spot_forecasts_check_data(self): Test that the plugin returns an Iris.cube.Cube matching the expected data values when a cube containing mean and variance is passed in. The resulting data values are the percentiles, which have been - generated. + generated for a spot forecast. """ data = np.array([[[225.56812863, 236.81812863, 248.06812863, 259.31812863, 270.56812863, 281.81812863, @@ -276,8 +263,6 @@ def test_spot_forecasts_check_data(self): "realization", iris.analysis.MEAN) current_forecast_variance = cube.collapsed( "realization", iris.analysis.VARIANCE) - current_forecast_predictor_and_variance = ( - current_forecast_predictor, current_forecast_variance) percentiles = [0.1, 0.5, 0.9] plugin = Plugin() result = plugin._mean_and_variance_to_percentiles( diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 903078a56d..9831928e1a 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -38,9 +38,8 @@ import unittest from cf_units import Unit -import iris from iris.coords import AuxCoord, DimCoord -from iris.cube import Cube, CubeList +from iris.cube import Cube from iris.tests import IrisTest from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( @@ -73,21 +72,24 @@ def set_up_cube(data, phenomenon_standard_name, phenomenon_units, def set_up_temperature_cube(): """Create a cube with metadata and values suitable for air temperature.""" data = np.array([[[[1.0, 0.9, 1.0], - [0.8, 0.9, 0.5], - [0.5, 0.2, 0.0]]], - [[[1.0, 0.5, 1.0], - [0.5, 0.5, 0.3], - [0.2, 0.0, 0.0]]], - [[[1.0, 0.2, 0.5], - [0.2, 0.0, 0.1], - [0.0, 0.0, 0.0]]]]) + [0.8, 0.9, 0.5], + [0.5, 0.2, 0.0]]], + [[[1.0, 0.5, 1.0], + [0.5, 0.5, 0.3], + [0.2, 0.0, 0.0]]], + [[[1.0, 0.2, 0.5], + [0.2, 0.0, 0.1], + [0.0, 0.0, 0.0]]]]) return set_up_cube(data, "air_temperature", "1") def set_up_spot_cube(data, phenomenon_standard_name, phenomenon_units, forecast_thresholds=[8, 10, 12], y_dimension_length=9, x_dimension_length=9): - """Create a cube containing multiple realizations.""" + """ + Create a cube containing multiple realizations, where one of the + dimensions is an index used for spot forecasts. + """ cube = Cube(data, standard_name=phenomenon_standard_name, units=phenomenon_units) cube.add_dim_coord( @@ -108,7 +110,10 @@ def set_up_spot_cube(data, phenomenon_standard_name, phenomenon_units, def set_up_spot_temperature_cube(): - """Create a cube with metadata and values suitable for air temperature.""" + """ + Create a cube with metadata and values suitable for air temperature + for spot forecasts. + """ data = np.array([[[1.0, 0.9, 1.0, 0.8, 0.9, 0.5, 0.5, 0.2, 0.0]], @@ -145,7 +150,10 @@ def test_basic(self): self.assertIsInstance(result, Cube) def test_check_data(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + """ data = np.array([[[[15.8, 31., 46.2], [8., 10., 31.], [10.4, 12., 42.4]]], @@ -165,7 +173,11 @@ def test_check_data(self): self.assertArrayAlmostEqual(result.data, data) def test_check_single_threshold(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if a single threshold is used for + constructing the percentiles. + """ data = np.array([[[[12.2, 29., 45.8], [8., 26.66666667, 45.33333333], [12.2, 29., 45.8]]], @@ -188,7 +200,10 @@ def test_check_single_threshold(self): self.assertArrayAlmostEqual(result.data, data) def test_lots_of_probability_thresholds(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if there are lots of thresholds. + """ input_probs_1d = np.linspace(1, 0, 30) input_probs = np.tile(input_probs_1d, (3, 3, 1, 1)).T @@ -215,7 +230,11 @@ def test_lots_of_probability_thresholds(self): self.assertArrayAlmostEqual(result.data, data) def test_lots_of_percentiles(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if lots of percentile values are + requested. + """ data = np.array([[[[13.9, 15.8, 17.7], [19.6, 21.5, 23.4], [25.3, 27.2, 29.1]]], @@ -263,7 +282,7 @@ def test_lots_of_percentiles(self): [11.5, -37., -34.]]], [[[-31., -28., -25.], [-22., -19., -16.], - [-13., -10., -7.]]], + [-13., -10., -7.]]], [[[-4., -1., 2.], [5., 8., 8.5], [9., 9.5, -37.6]]], @@ -282,7 +301,10 @@ def test_lots_of_percentiles(self): self.assertArrayAlmostEqual(result.data, data) def test_check_data_spot_forecasts(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles for spot forecasts. + """ data = np.array([[[15.8, 31., 46.2, 8., 10., 31., 10.4, 12., 42.4]], @@ -312,7 +334,10 @@ def setUp(self): set_up_temperature_cube())) def test_check_data_specifying_percentiles(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for a specific number of percentiles. + """ data = np.array([[[[21.5, 31., 40.5], [8.75, 10., 11.66666667], [11., 12., 31.]]], @@ -331,7 +356,10 @@ def test_check_data_specifying_percentiles(self): self.assertArrayAlmostEqual(result.data, data) def test_check_data_not_specifying_percentiles(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values without specifying the number of percentiles. + """ data = np.array([[[[21.5, 31., 40.5], [8.75, 10., 11.66666667], [11., 12., 31.]]], From fa986bf5eaa3eb6c9b14cb3209463216111d1b4e Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 8 May 2017 16:41:57 +0100 Subject: [PATCH 0091/1367] Removed trialling whitespace. --- .../tests/test_ensemble_copula_coupling_EnsembleReordering.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index a39b4c18c7..0cb7b805be 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -118,7 +118,7 @@ def test_realization_for_equal(self): """ Test to check the behaviour whether the number of percentiles equals the number of members. For when the length of the percentiles equals - the length of the members, check that the points of the realization + the length of the members, check that the points of the realization coordinate is as expected. """ data = [0, 1, 2] From 0c3644ec38d2e6204cac06b9c16f4f93ef00056a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 9 May 2017 10:46:35 +0100 Subject: [PATCH 0092/1367] Edits for the constant bounds used for the empirical Cumulative Distribution Function, including the addition of a method for ensuring that the units of the bounds matches the units of the forecast, which is being processed. Missing docstrings have also been added for the _add_bounds_to_thresholds_and_probabilities method. --- .../ensemble_copula_coupling.py | 28 ++++- .../ensemble_copula_coupling_constants.py | 9 +- ...ng_GeneratePercentilesFromProbabilities.py | 116 ++++++++++++++++++ 3 files changed, 149 insertions(+), 4 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 4f3e134e34..886ae3b55e 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -37,11 +37,12 @@ import iris +import cf_units as unit from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf + import bounds_for_ecdf, units_of_bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import create_cube_with_percentiles, create_percentiles @@ -159,6 +160,28 @@ def _probabilities_to_percentiles( percentile_cube.cell_methods = {} return percentile_cube + def _convert_bounds_units(self, forecast_probabilities): + """ + Convert the units of the bounds_pairing to the units of the + forecast. + """ + fp_units = ( + forecast_probabilities.coord("probability_above_threshold").units) + # Extract bounds from dictionary of constants. + try: + bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] + bounds_pairing_units = ( + units_of_bounds_for_ecdf[forecast_probabilities.name()]) + except KeyError as err: + msg = ("The forecast_probabilities name: {} is not recognised" + "within bounds_for_ecdf / units_of_bounds_for_ecdf: {}.\n" + "Error: {}") + raise KeyError(msg) + bounds_pairing_units = unit.Unit(bounds_pairing_units) + bounds_pairing = bounds_pairing_units.convert( + np.array(bounds_pairing), fp_units) + return bounds_pairing + def process(self, forecast_probabilities, no_of_percentiles=None, sampling="quantile"): """ @@ -201,8 +224,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, percentiles = create_percentiles( no_of_percentiles, sampling=sampling) - # Extract bounds from dictionary of constants. - bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] + bounds_pairing = self._convert_bounds_units(forecast_probabilities) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 62d3cb6995..11a1acc6e1 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -2,4 +2,11 @@ # Specify the bounds for each phenomenon for creating the empirical # cumulative distribution function. -bounds_for_ecdf = {"air_temperature": (-40, 50)} +bounds_for_ecdf = {"air_temperature": (-40, 50), + "wind_speed": (0, 50), + "air_pressure_at_sea_level": (940, 1070)} + +# Specify the units for the bounds for each phenomenon +units_of_bounds_for_ecdf = {"air_temperature": "degreesC", + "wind_speed": "m s^-1", + "air_pressure_at_sea_level": "hPa"} \ No newline at end of file diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 9831928e1a..5d93ff6e1f 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -44,6 +44,8 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromProbabilities as Plugin) +from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ + import bounds_for_ecdf, units_of_bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period) @@ -126,6 +128,62 @@ def set_up_spot_temperature_cube(): return set_up_spot_cube(data, "air_temperature", "1") +class Test__add_bounds_to_thresholds_and_probabilities(IrisTest): + + """Test the _add_bounds_to_thresholds_and_probabilities plugin.""" + + def setUp(self): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the plugin returns two numpy arrays.""" + cube = self.current_temperature_forecast_cube + threshold_points = cube.coord("probability_above_threshold").points + probabilities_for_cdf = cube.data.reshape(3, 9) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing) + self.assertIsInstance(result[0], np.ndarray) + self.assertIsInstance(result[1], np.ndarray) + + def test_bounds_of_threshold_points(self): + """ + Test that the plugin returns the expected results for the + threshold_points, where they've been padded with the values from + the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + threshold_points = cube.coord("probability_above_threshold").points + probabilities_for_cdf = cube.data.reshape(3, 9) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing) + self.assertArrayAlmostEqual(result[0][0], bounds_pairing[0]) + self.assertArrayAlmostEqual(result[0][-1], bounds_pairing[1]) + + def test_probability_data(self): + """ + Test that the plugin returns the expected results for the + probabilities, where they've been padded with zeros and ones to + represent the extreme ends of the Cumulative Distribution Function. + """ + cube = self.current_temperature_forecast_cube + threshold_points = cube.coord("probability_above_threshold").points + probabilities_for_cdf = cube.data.reshape(3, 9) + zero_array = np.zeros(probabilities_for_cdf[:, 0].shape) + one_array = np.ones(probabilities_for_cdf[:, 0].shape) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing) + self.assertArrayAlmostEqual(result[1][:, 0], zero_array) + self.assertArrayAlmostEqual(result[1][:, -1], one_array) + + class Test__probabilities_to_percentiles(IrisTest): """Test the _create_cube_with_percentiles plugin.""" @@ -323,6 +381,64 @@ def test_check_data_spot_forecasts(self): self.assertArrayAlmostEqual(result.data, data) +class Test__convert_bounds_units(IrisTest): + + """Test the _convert_bounds_units plugin.""" + + def setUp(self): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the result is a numpy array.""" + cube = self.current_temperature_forecast_cube + plugin = Plugin() + result = plugin._convert_bounds_units(cube) + self.assertIsInstance(result, np.ndarray) + + def test_check_data(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + bounds_pairing = bounds_for_ecdf["air_temperature"] + plugin = Plugin() + result = plugin._convert_bounds_units(cube) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_unit_conversion(self): + """ + Test that the expected results are returned for the bounds_pairing, + if the units of the bounds_pairings need to be converted to match + the units of the forecast. + """ + cube = self.current_temperature_forecast_cube + cube.coord("probability_above_threshold").convert_units("fahrenheit") + fahrenheit_units = cube.coord("probability_above_threshold").units + bounds_pairing = bounds_for_ecdf["air_temperature"] + bounds_pairing_units = units_of_bounds_for_ecdf["air_temperature"] + bounds_pairing_units = Unit(bounds_pairing_units) + bounds_pairing = bounds_pairing_units.convert( + np.array(bounds_pairing), fahrenheit_units) + plugin = Plugin() + result = plugin._convert_bounds_units(cube) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_exception_is_raised(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + cube.standard_name = None + cube.long_name = "Nonsense" + bounds_pairing = bounds_for_ecdf["air_temperature"] + plugin = Plugin() + msg = "The forecast_probabilities name" + with self.assertRaisesRegexp(KeyError, msg): + result = plugin._convert_bounds_units(cube) + + class Test_process(IrisTest): """Test the _create_cube_with_percentiles plugin.""" From 136e4f214d9287d6173ce03c95285c9faad70580 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 9 May 2017 10:55:54 +0100 Subject: [PATCH 0093/1367] Single pep8 fix. --- .../ensemble_copula_coupling_constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 11a1acc6e1..91e9fd2705 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -9,4 +9,4 @@ # Specify the units for the bounds for each phenomenon units_of_bounds_for_ecdf = {"air_temperature": "degreesC", "wind_speed": "m s^-1", - "air_pressure_at_sea_level": "hPa"} \ No newline at end of file + "air_pressure_at_sea_level": "hPa"} From 4b659f4d8f924d532f96eb1c58692f78b1644d9b Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 12 May 2017 10:55:42 +0100 Subject: [PATCH 0094/1367] Edits to make the reshaping of the percentile data agnostic of the dimensions of the input data. This means that no explicit if statements are required to deal with spot forecasts, and therefore no explicit knowledge is required of the format of the spot forecast netCDF files. Tests have also been added, in case the input cube doesn't contain a forecast_period or forecast_reference_time coordinate. --- .../ensemble_copula_coupling.py | 47 ++++++------- .../ensemble_copula_coupling_utilities.py | 68 ++++++++++++++----- ...oupling_EnsembleCopulaCouplingUtilities.py | 46 ++++++++++++- 3 files changed, 117 insertions(+), 44 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 886ae3b55e..fe6aed3264 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -141,19 +141,17 @@ def _probabilities_to_percentiles( percentiles, probabilities_for_cdf[index, :], threshold_points) - if forecast_probabilities.coords("locnum"): - t_coord = forecast_probabilities.coord("time") - locnum_coord = forecast_probabilities.coord("locnum") - forecast_at_percentiles = forecast_at_percentiles.reshape( - len(percentiles), len(t_coord.points), - len(locnum_coord.points)) - else: - t_coord = forecast_probabilities.coord("time") - y_coord = forecast_probabilities.coord(axis="y") - x_coord = forecast_probabilities.coord(axis="x") - forecast_at_percentiles = forecast_at_percentiles.reshape( - len(percentiles), len(t_coord.points), len(y_coord.points), - len(x_coord.points)) + # Reshape forecast_at_percentiles, so the percentiles dimension is + # first, and any other dimension coordinates follow. + shape_to_reshape_to = list(forecast_probabilities.shape) + if forecast_probabilities.coord_dims("probability_above_threshold"): + pat_coord_position = ( + forecast_probabilities.coord_dims("probability_above_threshold")) + shape_to_reshape_to.pop(pat_coord_position[0]) + shape_to_reshape_to = [len(percentiles)] + shape_to_reshape_to + + forecast_at_percentiles = ( + forecast_at_percentiles.reshape(shape_to_reshape_to)) percentile_cube = create_cube_with_percentiles( percentiles, forecast_probabilities, forecast_at_percentiles) @@ -308,19 +306,16 @@ def _mean_and_variance_to_percentiles( result = result.T - if calibrated_forecast_predictor.coords("locnum"): - t_coord = calibrated_forecast_predictor.coord("time") - locnum_coord = calibrated_forecast_predictor.coord("locnum") - result = result.reshape( - len(percentiles), len(t_coord.points), - len(locnum_coord.points)) - else: - t_coord = calibrated_forecast_predictor.coord("time") - y_coord = calibrated_forecast_predictor.coord(axis="y") - x_coord = calibrated_forecast_predictor.coord(axis="x") - result = result.reshape( - len(percentiles), len(t_coord.points), len(y_coord.points), - len(x_coord.points)) + # Reshape forecast_at_percentiles, so the percentiles dimension is + # first, and any other dimension coordinates follow. + shape_to_reshape_to = list(calibrated_forecast_predictor.shape) + if calibrated_forecast_predictor.coord_dims("realization"): + realization_coord_position = ( + calibrated_forecast_predictor.coord_dims("realization")) + shape_to_reshape_to.pop(realization_coord_position[0]) + shape_to_reshape_to = [len(percentiles)] + shape_to_reshape_to + + result = result.reshape(shape_to_reshape_to) percentile_cube = create_cube_with_percentiles( percentiles, calibrated_forecast_predictor, result) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 1e05e20b20..a80de1a840 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -39,6 +39,7 @@ import cf_units as unit import iris +from iris.exceptions import CoordinateNotFoundError def create_percentiles(no_of_percentiles, sampling="quantile"): @@ -114,27 +115,60 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): Coordinate name of the matched coordinate. """ + def _append_to_aux_coords_and_dims(coord_name, aux_coords_and_dims): + """ + Try to append a tuple containing a desired auxiliary coordinate, + if the auxiliary coordinate is present on the template cube. + + Parameters + ---------- + coord_name : String + The name of the desired auxiliary coordinate. + aux_coords_and_dims : List of tuples + List of format: [(aux_coord1, dim_coord_to_be_associated_with), + (aux_coord2, dim_coord_to_be_associated_with)] + For example: [(forecast_period, 1), (forecast_reference_time, 1)] + + """ + try: + coord = template_cube.coord(coord_name) + for coord_tuple in dim_coords_and_dims: + if coord_tuple[0].name() in ["time"]: + time_dim = coord_tuple[1] + break + aux_coords_and_dims.append((coord, time_dim)) + except CoordinateNotFoundError: + pass + percentile_coord = iris.coords.DimCoord( np.float32(percentiles), long_name="percentile", units=unit.Unit("1"), var_name="percentile") - if template_cube.coords("locnum"): - time_coord = template_cube.coord("time") - locnum_coord = template_cube.coord("locnum") - dim_coords_and_dims = [ - (percentile_coord, 0), (time_coord, 1), - (locnum_coord, 2)] - else: - time_coord = template_cube.coord("time") - y_coord = template_cube.coord(axis="y") - x_coord = template_cube.coord(axis="x") - dim_coords_and_dims = [ - (percentile_coord, 0), (time_coord, 1), - (y_coord, 2), (x_coord, 3)] - - frt_coord = template_cube.coord("forecast_reference_time") - fp_coord = template_cube.coord("forecast_period") - aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] + # Aim to create a list of tuples for setting the dim_coords_and_dims + # required for a cube. The "realization" or "probability_above_threshold" + # coordinates on the cube are ignored, with all other coordinates being + # added to the dim_coords_and_dims list. The percentile coordinate tuple + # is prepended to this list. + dim_coords = [] + dims = [] + index = 1 + for coord in template_cube.dim_coords: + if coord.name() in ["realization", "probability_above_threshold"]: + continue + dim_coords.append(coord) + dims.append(index) + index += 1 + + dim_coords_and_dims = [] + for coord, dim in zip(dim_coords, dims): + dim_coords_and_dims.append((coord, dim)) + dim_coords_and_dims = [(percentile_coord, 0)] + dim_coords_and_dims + + aux_coords_and_dims = [] + _append_to_aux_coords_and_dims( + "forecast_reference_time", aux_coords_and_dims) + _append_to_aux_coords_and_dims( + "forecast_period", aux_coords_and_dims) metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 445c971248..239c651d9b 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -111,7 +111,11 @@ def test_percentile_points(self): result.coord("percentile").points, percentiles) def test_spot_forecasts_percentile_points(self): - """Test that the plugin returns an Iris.cube.Cube.""" + """ + Test that the plugin returns a Cube with a percentile dimension + coordinate and that the percentile dimension has the expected points + for an input spot forecast. + """ cube = self.current_temperature_spot_forecast_cube cube_data = cube.data + 2 percentiles = [0.1, 0.5, 0.9] @@ -122,6 +126,46 @@ def test_spot_forecasts_percentile_points(self): self.assertArrayAlmostEqual( result.coord("percentile").points, percentiles) + def test_no_forecast_period(self): + """ + Test that the plugin returns an Iris.cube.Cube, when there is no + forecast_period on the input cube. + """ + cube = self.current_temperature_forecast_cube + cube.remove_coord("forecast_period") + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + + def test_no_forecast_reference_time(self): + """ + Test that the plugin returns an Iris.cube.Cube, when there is no + forecast_reference_time on the input cube. + """ + cube = self.current_temperature_forecast_cube + cube.remove_coord("forecast_reference_time") + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + + def test_no_forecast_period_or_forecast_reference_time(self): + """ + Test that the plugin returns an Iris.cube.Cube, when there is no + forecast_period and no forecast_reference_time on the input cube. + """ + cube = self.current_temperature_forecast_cube + cube.remove_coord("forecast_period") + cube.remove_coord("forecast_reference_time") + cube_data = cube.data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + class Test_create_percentiles(IrisTest): From 24ded334220aca581fb702f94d78468d44db9344 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 12 May 2017 11:02:13 +0100 Subject: [PATCH 0095/1367] Single pep8 fix. --- .../ensemble_copula_coupling/ensemble_copula_coupling.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index fe6aed3264..cd229c5e88 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -146,7 +146,8 @@ def _probabilities_to_percentiles( shape_to_reshape_to = list(forecast_probabilities.shape) if forecast_probabilities.coord_dims("probability_above_threshold"): pat_coord_position = ( - forecast_probabilities.coord_dims("probability_above_threshold")) + forecast_probabilities.coord_dims( + "probability_above_threshold")) shape_to_reshape_to.pop(pat_coord_position[0]) shape_to_reshape_to = [len(percentiles)] + shape_to_reshape_to From db7191f7ea3bd52eab737ec7f522020284a5ce1a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 12 May 2017 15:52:12 +0100 Subject: [PATCH 0096/1367] Fixes for suggested changes to constants file to extend docstring and put all values in SI units. --- .../ensemble_copula_coupling_constants.py | 16 +++++++++------- ...pling_GeneratePercentilesFromProbabilities.py | 6 ++++++ 2 files changed, 15 insertions(+), 7 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 91e9fd2705..5c110e89c1 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -1,12 +1,14 @@ """Module to contain constants used for Ensemble Copula Coupling.""" -# Specify the bounds for each phenomenon for creating the empirical -# cumulative distribution function. -bounds_for_ecdf = {"air_temperature": (-40, 50), +# For the creation of an empirical cumulative distribution function, +# the following dictionary specifies the end points of the distribution, +# as a first approximation of likely climatological lower and upper bounds. +bounds_for_ecdf = {"air_temperature": (-40+273.15, 50+273.15), "wind_speed": (0, 50), - "air_pressure_at_sea_level": (940, 1070)} + "air_pressure_at_sea_level": (94000, 107000)} -# Specify the units for the bounds for each phenomenon -units_of_bounds_for_ecdf = {"air_temperature": "degreesC", +# Specify the units for the end points of the distribution for each phenomenon. +# SI units are used exclusively. +units_of_bounds_for_ecdf = {"air_temperature": "Kelvin", "wind_speed": "m s^-1", - "air_pressure_at_sea_level": "hPa"} + "air_pressure_at_sea_level": "Pa"} diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 5d93ff6e1f..49364c30b8 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -402,7 +402,13 @@ def test_check_data(self): Test that the expected results are returned for the bounds_pairing. """ cube = self.current_temperature_forecast_cube + fp_units = ( + cube.coord("probability_above_threshold").units) bounds_pairing = bounds_for_ecdf["air_temperature"] + bounds_pairing_units = units_of_bounds_for_ecdf["air_temperature"] + bounds_pairing_units = Unit(bounds_pairing_units) + bounds_pairing = ( + bounds_pairing_units.convert(np.array(bounds_pairing), fp_units)) plugin = Plugin() result = plugin._convert_bounds_units(cube) self.assertArrayAlmostEqual(result, bounds_pairing) From 84ca6488903ca032a31cbe6f5e8278fe5a2eb63f Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Fri, 12 May 2017 17:35:33 +0100 Subject: [PATCH 0097/1367] Refactoring of the create_cube_with_percentiles function, and associated unit tests edits. --- .../ensemble_copula_coupling.py | 12 ++- .../ensemble_copula_coupling_utilities.py | 90 ++++++------------- ...oupling_EnsembleCopulaCouplingUtilities.py | 72 ++++++--------- 3 files changed, 62 insertions(+), 112 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index cd229c5e88..6eaba98eb9 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -154,8 +154,12 @@ def _probabilities_to_percentiles( forecast_at_percentiles = ( forecast_at_percentiles.reshape(shape_to_reshape_to)) + for template_cube in forecast_probabilities.slices_over( + "probability_above_threshold"): + template_cube.remove_coord("probability_above_threshold") + break percentile_cube = create_cube_with_percentiles( - percentiles, forecast_probabilities, forecast_at_percentiles) + percentiles, template_cube, forecast_at_percentiles) percentile_cube.cell_methods = {} return percentile_cube @@ -318,8 +322,12 @@ def _mean_and_variance_to_percentiles( result = result.reshape(shape_to_reshape_to) + for template_cube in calibrated_forecast_predictor.slices_over( + "realization"): + template_cube.remove_coord("realization") + break percentile_cube = create_cube_with_percentiles( - percentiles, calibrated_forecast_predictor, result) + percentiles, template_cube, result) percentile_cube.cell_methods = {} return percentile_cube diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index a80de1a840..35723166b7 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -96,85 +96,49 @@ def create_percentiles(no_of_percentiles, sampling="quantile"): def create_cube_with_percentiles(percentiles, template_cube, cube_data): """ Create a cube with a percentile coordinate based on a template cube. + The resulting cube will have an extra percentile coordinate compared with + the input cube. The shape of the cube_data should be the shape of the + desired output cube. Parameters ---------- percentiles : List Ensemble percentiles. template_cube : Iris cube - Cube to copy majority of coordinate definitions from. + Cube to copy all coordinates from. + Metadata is also copied from this cube. cube_data : Numpy array Data to insert into the template cube. - The data is expected to have the shape of - percentiles (0th dimension), time (1st dimension), - y_coord (2nd dimension), x_coord (3rd dimension). + The shape of the cube_data, excluding the dimension associated with + the percentile coordinate, should be the same as the shape of + template_cube. + For example, template_cube shape is (3, 3, 3), whilst the cube_data + is (10, 3, 3, 3), where there are 10 percentiles. Returns ------- - String - Coordinate name of the matched coordinate. + result : Iris.cube.Cube + Cube containing a percentile coordinate as the zeroth dimension + coordinate. """ - def _append_to_aux_coords_and_dims(coord_name, aux_coords_and_dims): - """ - Try to append a tuple containing a desired auxiliary coordinate, - if the auxiliary coordinate is present on the template cube. - - Parameters - ---------- - coord_name : String - The name of the desired auxiliary coordinate. - aux_coords_and_dims : List of tuples - List of format: [(aux_coord1, dim_coord_to_be_associated_with), - (aux_coord2, dim_coord_to_be_associated_with)] - For example: [(forecast_period, 1), (forecast_reference_time, 1)] - - """ - try: - coord = template_cube.coord(coord_name) - for coord_tuple in dim_coords_and_dims: - if coord_tuple[0].name() in ["time"]: - time_dim = coord_tuple[1] - break - aux_coords_and_dims.append((coord, time_dim)) - except CoordinateNotFoundError: - pass - percentile_coord = iris.coords.DimCoord( np.float32(percentiles), long_name="percentile", units=unit.Unit("1"), var_name="percentile") - # Aim to create a list of tuples for setting the dim_coords_and_dims - # required for a cube. The "realization" or "probability_above_threshold" - # coordinates on the cube are ignored, with all other coordinates being - # added to the dim_coords_and_dims list. The percentile coordinate tuple - # is prepended to this list. - dim_coords = [] - dims = [] - index = 1 - for coord in template_cube.dim_coords: - if coord.name() in ["realization", "probability_above_threshold"]: - continue - dim_coords.append(coord) - dims.append(index) - index += 1 - - dim_coords_and_dims = [] - for coord, dim in zip(dim_coords, dims): - dim_coords_and_dims.append((coord, dim)) - dim_coords_and_dims = [(percentile_coord, 0)] + dim_coords_and_dims - - aux_coords_and_dims = [] - _append_to_aux_coords_and_dims( - "forecast_reference_time", aux_coords_and_dims) - _append_to_aux_coords_and_dims( - "forecast_period", aux_coords_and_dims) - metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) + result = iris.cube.Cube(cube_data, **metadata_dict) + result.add_dim_coord(percentile_coord, 0) - cube = iris.cube.Cube( - cube_data, dim_coords_and_dims=dim_coords_and_dims, - aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) - cube.attributes = template_cube.attributes - cube.cell_methods = template_cube.cell_methods - return cube + for coord in template_cube.dim_coords: + dim, = template_cube.coord_dims(coord) + result.add_dim_coord(coord.copy(), dim+1) + for coord in template_cube.aux_coords: + dims = template_cube.coord_dims(coord) + dims = tuple([dim+1 for dim in dims]) + result.add_aux_coord(coord.copy(), dims) + for coord in template_cube.derived_coords: + dims = template_cube.coord_dims(coord) + dims = tuple([dim+1 for dim in dims]) + result.add_aux_coord(coord.copy(), dims) + return result diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 239c651d9b..09c4821599 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -42,7 +42,8 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import create_percentiles, create_cube_with_percentiles from improver.tests.helper_functions_ensemble_calibration import ( - set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) + set_up_temperature_cube, set_up_spot_temperature_cube, + _add_forecast_reference_time_and_forecast_period) class Test_create_cube_with_percentiles(IrisTest): @@ -51,17 +52,34 @@ class Test_create_cube_with_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" - self.current_temperature_forecast_cube = ( + current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) - self.current_temperature_spot_forecast_cube = ( + + self.cube_data = current_temperature_forecast_cube.data + + current_temperature_spot_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) + set_up_spot_temperature_cube())) + self.cube_spot_data = ( + current_temperature_spot_forecast_cube.data) + + for cube in current_temperature_forecast_cube.slices_over( + "realization"): + cube.remove_coord("realization") + break + self.current_temperature_forecast_cube = cube + + for cube in current_temperature_spot_forecast_cube.slices_over( + "realization"): + cube.remove_coord("realization") + break + self.current_temperature_spot_forecast_cube = cube def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" cube = self.current_temperature_forecast_cube - cube_data = cube.data + 2 + cube_data = self.cube_data + 2 percentiles = [0.1, 0.5, 0.9] result = create_cube_with_percentiles( percentiles, cube, cube_data) @@ -103,7 +121,7 @@ def test_percentile_points(self): with a percentile coordinate with the desired points. """ cube = self.current_temperature_forecast_cube - cube_data = cube.data + 2 + cube_data = self.cube_data + 2 percentiles = [0.1, 0.5, 0.9] result = create_cube_with_percentiles(percentiles, cube, cube_data) self.assertIsInstance(result.coord("percentile"), DimCoord) @@ -117,7 +135,7 @@ def test_spot_forecasts_percentile_points(self): for an input spot forecast. """ cube = self.current_temperature_spot_forecast_cube - cube_data = cube.data + 2 + cube_data = self.cube_spot_data + 2 percentiles = [0.1, 0.5, 0.9] result = create_cube_with_percentiles( percentiles, cube, cube_data) @@ -126,46 +144,6 @@ def test_spot_forecasts_percentile_points(self): self.assertArrayAlmostEqual( result.coord("percentile").points, percentiles) - def test_no_forecast_period(self): - """ - Test that the plugin returns an Iris.cube.Cube, when there is no - forecast_period on the input cube. - """ - cube = self.current_temperature_forecast_cube - cube.remove_coord("forecast_period") - cube_data = cube.data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - - def test_no_forecast_reference_time(self): - """ - Test that the plugin returns an Iris.cube.Cube, when there is no - forecast_reference_time on the input cube. - """ - cube = self.current_temperature_forecast_cube - cube.remove_coord("forecast_reference_time") - cube_data = cube.data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - - def test_no_forecast_period_or_forecast_reference_time(self): - """ - Test that the plugin returns an Iris.cube.Cube, when there is no - forecast_period and no forecast_reference_time on the input cube. - """ - cube = self.current_temperature_forecast_cube - cube.remove_coord("forecast_period") - cube.remove_coord("forecast_reference_time") - cube_data = cube.data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - class Test_create_percentiles(IrisTest): From 5c340db34b427b7098a1742401feb6d2f5c236cf Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 15 May 2017 11:08:29 +0100 Subject: [PATCH 0098/1367] Add unit tests and make docstring edits as requested. --- .../ensemble_copula_coupling_utilities.py | 10 ++++- ...oupling_EnsembleCopulaCouplingUtilities.py | 43 +++++++++++++++++++ 2 files changed, 51 insertions(+), 2 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 35723166b7..d4d8865b05 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -103,7 +103,8 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): Parameters ---------- percentiles : List - Ensemble percentiles. + Ensemble percentiles. There should be the same number of percentiles + as the first dimension of cube_data. template_cube : Iris cube Cube to copy all coordinates from. Metadata is also copied from this cube. @@ -119,7 +120,8 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): ------- result : Iris.cube.Cube Cube containing a percentile coordinate as the zeroth dimension - coordinate. + coordinate in addition to the coordinates and metadata from the + template cube. """ percentile_coord = iris.coords.DimCoord( @@ -130,6 +132,10 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): result = iris.cube.Cube(cube_data, **metadata_dict) result.add_dim_coord(percentile_coord, 0) + # For the dimension coordinates, the dimensions are incremented by one, + # as the percentile coordinate has been added as the zeroth coordinate. + # The dimension associated with the auxiliary and derived coordinates + # has also been incremented by one. for coord in template_cube.dim_coords: dim, = template_cube.coord_dims(coord) result.add_dim_coord(coord.copy(), dim+1) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 09c4821599..cdd1290583 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -144,6 +144,49 @@ def test_spot_forecasts_percentile_points(self): self.assertArrayAlmostEqual( result.coord("percentile").points, percentiles) + def test_percentile_length_too_short(self): + """ + Test that the plugin raises the default ValueError, if the number + of percentiles is fewer than the length of the zeroth dimension within + the cube. + """ + cube = self.current_temperature_forecast_cube + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5] + msg = "Unequal lengths" + with self.assertRaisesRegexp(ValueError, msg): + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + + def test_percentile_length_too_long(self): + """ + Test that the plugin raises the default ValueError, if the number + of percentiles exceeds the length of the zeroth dimension within + the cube. + """ + cube = self.current_temperature_forecast_cube + cube = cube[0, :, :, :] + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + msg = "Unequal lengths" + with self.assertRaisesRegexp(ValueError, msg): + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + + def test_metadata_copy(self): + """ + Test that the metadata dictionaries within the input cube, are + also present on the output cube. + """ + cube = self.current_temperature_forecast_cube + cube.attributes = {"source": "ukv"} + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertDictEqual( + cube.metadata._asdict(), result.metadata._asdict()) + class Test_create_percentiles(IrisTest): From 82d711a96d5ddfc4f60f921ea5bfc2c1e9cdd5d7 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 15 May 2017 14:46:12 +0100 Subject: [PATCH 0099/1367] Add unit test to test that the coordinates have been copied. --- ...oupling_EnsembleCopulaCouplingUtilities.py | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index cdd1290583..2fb2ce4283 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -34,8 +34,9 @@ """ import unittest -from iris.coords import DimCoord +from iris.coords import DimCoord, AuxCoord from iris.cube import Cube +from iris.exceptions import CoordinateNotFoundError from iris.tests import IrisTest import numpy as np @@ -187,6 +188,24 @@ def test_metadata_copy(self): self.assertDictEqual( cube.metadata._asdict(), result.metadata._asdict()) + def test_coordinate_copy(self): + """ + Test that the coordinates within the input cube, are + also present on the output cube. + """ + cube = self.current_temperature_forecast_cube + cube.attributes = {"source": "ukv"} + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + for coord in cube.coords(): + if coord not in result.coords(): + msg = ( + "Coordinate: {} not found in cube {}".format( + coord, result)) + raise CoordinateNotFoundError(msg) + class Test_create_percentiles(IrisTest): From 6d1a51d032496bb3e2ee00b829fac462ce3a962f Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 16 May 2017 10:20:06 +0100 Subject: [PATCH 0100/1367] Improvements to the docstrings and addition of unit tests following review comments. --- .../ensemble_copula_coupling.py | 86 +++++++++--- ...ng_GeneratePercentilesFromProbabilities.py | 132 +++++++++++++++--- 2 files changed, 183 insertions(+), 35 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 6eaba98eb9..4dd54af765 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -52,6 +52,16 @@ class GeneratePercentilesFromProbabilities(object): Class for generating percentiles from probabilities. In combination with the Ensemble Reordering plugin, this is Ensemble Copula Coupling. + + This class includes the ability to interpolate between probability + thresholds in order to generate the percentiles, see Figure 1 from + Flowerdew, 2014. + + Scientific Reference: + Flowerdew, J., 2014. + Calibrated ensemble reliability whilst preserving spatial structure. + Tellus Series A, Dynamic Meteorology and Oceanography, 66, 22662. + """ def __init__(self): @@ -63,9 +73,9 @@ def __init__(self): def _add_bounds_to_thresholds_and_probabilities( self, threshold_points, probabilities_for_cdf, bounds_pairing): """ - Padding of the lower and upper bounds for a given phenomenon for the - threshold_points, and padding of probabilities of 0 and 1 to the - forecast probabilities. + Padding of the lower and upper bounds of the distribution for a + given phenomenon for the threshold_points, and padding of + probabilities of 0 and 1 to the forecast probabilities. Parameters ---------- @@ -75,16 +85,17 @@ def _add_bounds_to_thresholds_and_probabilities( Array containing the probabilities used for constructing an empirical cumulative distribution function i.e. probabilities below threshold. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. Returns ------- threshold_points : Numpy array - Array of threshold values padded with the lower and upper bound. + Array of threshold values padded with the lower and upper bound + of the distribution. probabilities_for_cdf : Numpy array Array containing the probabilities padded with 0 and 1 at each end. - bounds_pairing : Tuple - Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. """ lower_bound, upper_bound = bounds_pairing @@ -94,6 +105,14 @@ def _add_bounds_to_thresholds_and_probabilities( ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) probabilities_for_cdf = np.concatenate( (zeroes_array, probabilities_for_cdf, ones_array), axis=1) + if np.any(np.diff(threshold_points) < 0): + msg = ("The end points added to the threshold values for " + "constructing the Cumulative Distribution Function (CDF) " + "must result in an ascending order. " + "In this case, the threshold points {} must be outside the " + "allowable range given by the bounds {}".format( + threshold_points, bounds_pairing)) + raise ValueError(msg) return threshold_points, probabilities_for_cdf def _probabilities_to_percentiles( @@ -118,7 +137,8 @@ def _probabilities_to_percentiles( Returns ------- percentile_cube : Iris cube - Cube with probabilities at the required percentiles. + Cube containing values for the required diagnostic e.g. + air_temperature at the required percentiles. """ threshold_points = ( @@ -130,6 +150,14 @@ def _probabilities_to_percentiles( # Invert probabilities probabilities_for_cdf = 1 - prob_slices + if np.any(np.diff(probabilities_for_cdf) < 0): + msg = ("The probability values used to construct the " + "Cumulative Distribution Function (CDF) " + "must be ascending i.e. in order to yield " + "a monotonically increasing CDF." + "The probabilities are {}".format(probabilities_for_cdf)) + raise ValueError(msg) + threshold_points, probabilities_for_cdf = ( self._add_bounds_to_thresholds_and_probabilities( threshold_points, probabilities_for_cdf, bounds_pairing)) @@ -163,10 +191,29 @@ def _probabilities_to_percentiles( percentile_cube.cell_methods = {} return percentile_cube - def _convert_bounds_units(self, forecast_probabilities): + def _get_bounds_of_distribution(self, forecast_probabilities): """ - Convert the units of the bounds_pairing to the units of the - forecast. + Gets the bounds of the distribution and converts the units of the + bounds_pairing to the units of the forecast. + + This method gets the bounds values and units from the imported + dictionaries: bounds_for_ecdf and units_of_bounds_for_ecdf. + The units of the bounds are converted to be the units of the input + cube. + + Parameters + ---------- + forecast_probabilities : Iris Cube + Cube expected to contain a probability_above_threshold + coordinate. + + Returns + ------- + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function, converted to have + the same units as the input cube. + """ fp_units = ( forecast_probabilities.coord("probability_above_threshold").units) @@ -177,8 +224,11 @@ def _convert_bounds_units(self, forecast_probabilities): units_of_bounds_for_ecdf[forecast_probabilities.name()]) except KeyError as err: msg = ("The forecast_probabilities name: {} is not recognised" - "within bounds_for_ecdf / units_of_bounds_for_ecdf: {}.\n" - "Error: {}") + "within bounds_for_ecdf {} or " + "units_of_bounds_for_ecdf: {}. \n" + "Error: {}".format( + forecast_probabilities.name(), bounds_for_ecdf, + units_of_bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( @@ -193,15 +243,18 @@ def process(self, forecast_probabilities, no_of_percentiles=None, 3. Accesses the lower and upper bound pair to find the ends of the empirical cumulative distribution function. 4. Convert the probability_above_threshold coordinate into - values at a set of percentiles. + values at a set of percentiles using linear interpolation, + see Figure 1 from Flowerdew, 2014. Parameters ---------- forecast_probabilities : Iris CubeList or Iris Cube Cube or CubeList expected to contain a probability_above_threshold coordinate. - no_of_percentiles : Integer + no_of_percentiles : Integer or None Number of percentiles + If None, the number of thresholds within the input + forecast_probabilities cube is used as the number of percentiles. sampling : String Type of sampling of the distribution to produce a set of percentiles e.g. quantile or random. @@ -227,7 +280,8 @@ def process(self, forecast_probabilities, no_of_percentiles=None, percentiles = create_percentiles( no_of_percentiles, sampling=sampling) - bounds_pairing = self._convert_bounds_units(forecast_probabilities) + bounds_pairing = ( + self._get_bounds_of_distribution(forecast_probabilities)) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 49364c30b8..1f0977532d 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -207,6 +207,111 @@ def test_basic(self): cube, percentiles, bounds_pairing) self.assertIsInstance(result, Cube) + def test_simple_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + + The input cube contains probabilities greater than a given threshold. + """ + expected = np.array([8.15384615, 9.38461538, 11.6]) + expected = expected[:, np.newaxis, np.newaxis, np.newaxis] + + data = np.array([0.95, 0.3, 0.05]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 10, 12], y_dimension_length=1, + x_dimension_length=1))) + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, expected) + + def test_probabilities_not_monotonically_increasing(self): + """ + Test that the plugin raises a ValueError when the probabilities + of the Cumulative Distribution Function are not monotonically + increasing. + """ + data = np.array([0.05, 0.7, 0.95]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 10, 12], y_dimension_length=1, + x_dimension_length=1))) + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The probability values used to construct the" + with self.assertRaisesRegexp(ValueError, msg): + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + + def test_thresholds_not_monotonically_increasing(self): + """ + Test that the plugin raises a ValueError, if threshold points + are added to the cube, which are non monotonically increasing. + """ + data = 1 - np.array([0.05, 0.7, 0.95]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + msg = "The points array must be strictly monotonic" + with self.assertRaisesRegexp(ValueError, msg): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 12, 10], y_dimension_length=1, + x_dimension_length=1))) + + def test_endpoints_of_distribution_exceeded(self): + """ + Test that the plugin raises a ValueError when the constant + end points of the distribution are exceeded by a threshold value + used in the forecast. + """ + data = 1 - np.array([0.05, 0.7, 0.95]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 10, 60], y_dimension_length=1, + x_dimension_length=1))) + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The end points added to the threshold values for" + with self.assertRaisesRegexp(ValueError, msg): + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + + def test_result_cube_has_no_probability_above_threshold_coordinate(self): + """ + Test that the plugin returns a cube with coordinates that + do not include the probability_above_threshold coordinate. + """ + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + for coord in result.coords(): + self.assertNotEqual(coord.name(), "probability_above_threshold") + def test_check_data(self): """ Test that the plugin returns an Iris.cube.Cube with the expected @@ -381,9 +486,9 @@ def test_check_data_spot_forecasts(self): self.assertArrayAlmostEqual(result.data, data) -class Test__convert_bounds_units(IrisTest): +class Test__get_bounds_of_distribution(IrisTest): - """Test the _convert_bounds_units plugin.""" + """Test the _get_bounds_of_distribution plugin.""" def setUp(self): self.current_temperature_forecast_cube = ( @@ -394,7 +499,7 @@ def test_basic(self): """Test that the result is a numpy array.""" cube = self.current_temperature_forecast_cube plugin = Plugin() - result = plugin._convert_bounds_units(cube) + result = plugin._get_bounds_of_distribution(cube) self.assertIsInstance(result, np.ndarray) def test_check_data(self): @@ -402,15 +507,9 @@ def test_check_data(self): Test that the expected results are returned for the bounds_pairing. """ cube = self.current_temperature_forecast_cube - fp_units = ( - cube.coord("probability_above_threshold").units) - bounds_pairing = bounds_for_ecdf["air_temperature"] - bounds_pairing_units = units_of_bounds_for_ecdf["air_temperature"] - bounds_pairing_units = Unit(bounds_pairing_units) - bounds_pairing = ( - bounds_pairing_units.convert(np.array(bounds_pairing), fp_units)) + bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._convert_bounds_units(cube) + result = plugin._get_bounds_of_distribution(cube) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_unit_conversion(self): @@ -421,14 +520,9 @@ def test_check_unit_conversion(self): """ cube = self.current_temperature_forecast_cube cube.coord("probability_above_threshold").convert_units("fahrenheit") - fahrenheit_units = cube.coord("probability_above_threshold").units - bounds_pairing = bounds_for_ecdf["air_temperature"] - bounds_pairing_units = units_of_bounds_for_ecdf["air_temperature"] - bounds_pairing_units = Unit(bounds_pairing_units) - bounds_pairing = bounds_pairing_units.convert( - np.array(bounds_pairing), fahrenheit_units) + bounds_pairing = (-40, 122) # In fahrenheit plugin = Plugin() - result = plugin._convert_bounds_units(cube) + result = plugin._get_bounds_of_distribution(cube) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_exception_is_raised(self): @@ -442,7 +536,7 @@ def test_check_exception_is_raised(self): plugin = Plugin() msg = "The forecast_probabilities name" with self.assertRaisesRegexp(KeyError, msg): - result = plugin._convert_bounds_units(cube) + result = plugin._get_bounds_of_distribution(cube) class Test_process(IrisTest): From a69d4a3c62ea7c2be96cf99b82d79348500c90eb Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 16 May 2017 11:59:45 +0100 Subject: [PATCH 0101/1367] Remove unit test of Iris functionality and move unit test for bounds to apply to _add_bounds_to_thresholds_and_probabilities method. --- ...ng_GeneratePercentilesFromProbabilities.py | 55 +++++-------------- 1 file changed, 15 insertions(+), 40 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 1f0977532d..0f43b9fa01 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -183,6 +183,21 @@ def test_probability_data(self): self.assertArrayAlmostEqual(result[1][:, 0], zero_array) self.assertArrayAlmostEqual(result[1][:, -1], one_array) + def test_endpoints_of_distribution_exceeded(self): + """ + Test that the plugin raises a ValueError when the constant + end points of the distribution are exceeded by a threshold value + used in the forecast. + """ + probabilities_for_cdf = np.array([[0.05, 0.7, 0.95]]) + threshold_points = np.array([8, 10, 60]) + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The end points added to the threshold values for" + with self.assertRaisesRegexp(ValueError, msg): + result = plugin._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing) + class Test__probabilities_to_percentiles(IrisTest): @@ -258,46 +273,6 @@ def test_probabilities_not_monotonically_increasing(self): result = plugin._probabilities_to_percentiles( cube, percentiles, bounds_pairing) - def test_thresholds_not_monotonically_increasing(self): - """ - Test that the plugin raises a ValueError, if threshold points - are added to the cube, which are non monotonically increasing. - """ - data = 1 - np.array([0.05, 0.7, 0.95]) - data = data[:, np.newaxis, np.newaxis, np.newaxis] - msg = "The points array must be strictly monotonic" - with self.assertRaisesRegexp(ValueError, msg): - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_cube( - data, "air_temperature", "1", - forecast_thresholds=[8, 12, 10], y_dimension_length=1, - x_dimension_length=1))) - - def test_endpoints_of_distribution_exceeded(self): - """ - Test that the plugin raises a ValueError when the constant - end points of the distribution are exceeded by a threshold value - used in the forecast. - """ - data = 1 - np.array([0.05, 0.7, 0.95]) - data = data[:, np.newaxis, np.newaxis, np.newaxis] - - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_cube( - data, "air_temperature", "1", - forecast_thresholds=[8, 10, 60], y_dimension_length=1, - x_dimension_length=1))) - cube = self.current_temperature_forecast_cube - percentiles = [0.1, 0.5, 0.9] - bounds_pairing = (-40, 50) - plugin = Plugin() - msg = "The end points added to the threshold values for" - with self.assertRaisesRegexp(ValueError, msg): - result = plugin._probabilities_to_percentiles( - cube, percentiles, bounds_pairing) - def test_result_cube_has_no_probability_above_threshold_coordinate(self): """ Test that the plugin returns a cube with coordinates that From 1b5206453e21797116355ba603aed7d2b397a20f Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 16 May 2017 16:15:20 +0100 Subject: [PATCH 0102/1367] Further edits to add additional unit tests for mismatch_between_length_of_raw_members_and_percentiles and improve some other unit tests for the EnsembleReordering plugin. --- .../ensemble_copula_coupling.py | 9 +- .../helper_functions_ensemble_calibration.py | 5 +- ...mble_copula_coupling_EnsembleReordering.py | 263 ++++++++---------- 3 files changed, 127 insertions(+), 150 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 4dd54af765..5ce9f7fc67 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -477,12 +477,19 @@ def mismatch_between_length_of_raw_members_and_percentiles( mlen = len(raw_forecast_members.coord("realization").points) if plen == mlen: pass - elif plen > mlen or plen < mlen: + else: raw_forecast_members_extended = iris.cube.CubeList() realization_list = [] mpoints = raw_forecast_members.coord("realization").points + # Loop over the number of percentiles and finding the + # corresponding ensemble member number. The ensemble member + # numbers are recycled e.g. 1, 2, 3, 1, 2, 3, etc. for index in range(plen): realization_list.append(mpoints[index % len(mpoints)]) + # Extract the members required in the realization_list from + # the raw_forecast_members. Edit the member number as appropriate + # and append to a cubelist containing rebadged raw ensemble + # members. for realization, index in zip(realization_list, range(plen)): constr = iris.Constraint(realization=realization) raw_forecast_member = raw_forecast_members.extract(constr) diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 78eb37e564..672ba5c99b 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -43,11 +43,12 @@ concatenate_cubes) -def set_up_cube(data, phenomenon_standard_name, phenomenon_units): +def set_up_cube(data, phenomenon_standard_name, phenomenon_units, + realizations=[0, 1, 2]): """Create a cube containing multiple realizations.""" cube = Cube(data, standard_name=phenomenon_standard_name, units=phenomenon_units) - cube.add_dim_coord(DimCoord([0, 1, 2], 'realization', + cube.add_dim_coord(DimCoord(realizations, 'realization', units='1'), 0) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 0cb7b805be..e362b025e2 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -33,6 +33,7 @@ `ensemble_copula_coupling.EnsembleReordering` plugin. """ +import itertools import unittest from iris.cube import Cube @@ -70,26 +71,31 @@ def setUp(self): self.percentile_cube = ( _add_forecast_reference_time_and_forecast_period(cube)) - def test_types_length_of_percentiles_equals_length_of_members(self): + def test_realization_for_equal(self): """ Test to check the behaviour whether the number of percentiles equals the number of members. For when the length of the percentiles equals - the length of the members, check that a Cube is returned. + the length of the members, check that the points of the realization + coordinate is as expected. """ + data = [0, 1, 2] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual( + data, result.coord("realization").points) - def test_types_length_of_percentiles_greater_than_length_of_members(self): + def test_realization_for_greater_than(self): """ Test to check the behaviour whether the number of percentiles is greater than the number of members. For when the length of the - percentiles is greater than the length of the members, check that a - Cube is returned. + percentiles is greater than the length of the members, check that the + points of the realization coordinate is as expected. """ + data = [0, 1, 2] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] @@ -97,14 +103,17 @@ def test_types_length_of_percentiles_greater_than_length_of_members(self): result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual( + data, result.coord("realization").points) - def test_types_length_of_percentiles_less_than_length_of_members(self): + def test_realization_for_less_than(self): """ Test to check the behaviour whether the number of percentiles is less than the number of members. For when the length of the - percentiles is less than the length of the members, check that a - Cube is returned. + percentiles is less than the length of the members, check that the + points of the realization coordinate is as expected. """ + data = [0, 1] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube post_processed_forecast_percentiles = ( @@ -113,8 +122,10 @@ def test_types_length_of_percentiles_less_than_length_of_members(self): result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual( + data, result.coord("realization").points) - def test_realization_for_equal(self): + def test_realization_for_equal_check_data(self): """ Test to check the behaviour whether the number of percentiles equals the number of members. For when the length of the percentiles equals @@ -122,39 +133,62 @@ def test_realization_for_equal(self): coordinate is as expected. """ data = [0, 1, 2] + data = np.array([[[[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]]], + [[[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]]], + [[[8., 8.625, 9.25], + [9.875, 10.5, 11.125], + [11.75, 12.375, 13.]]]]) + post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - self.assertArrayAlmostEqual( - data, result.coord("realization").points) + self.assertArrayAlmostEqual(data, result.data) - def test_realization_for_greater_than(self): + def test_realization_for_greater_than_check_data(self): """ Test to check the behaviour whether the number of percentiles is greater than the number of members. For when the length of the percentiles is greater than the length of the members, check that the points of the realization coordinate is as expected. """ - data = [0, 1, 2] + data = np.array([[[[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]], + [[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]]]]) post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube + # Slice number of raw forecast members, so that there are fewer + # members than percentiles. raw_forecast_members = raw_forecast_members[:2, :, :, :] plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - self.assertArrayAlmostEqual( - data, result.coord("realization").points) + self.assertArrayAlmostEqual(data, result.data) - def test_realization_for_less_than(self): + def test_realization_for_less_than_check_data(self): """ Test to check the behaviour whether the number of percentiles is less than the number of members. For when the length of the percentiles is less than the length of the members, check that the points of the realization coordinate is as expected. """ - data = [0, 1] + data = np.array([[[[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]]]]) post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube post_processed_forecast_percentiles = ( @@ -162,8 +196,63 @@ def test_realization_for_less_than(self): plugin = Plugin() result = plugin.mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) - self.assertArrayAlmostEqual( - data, result.coord("realization").points) + self.assertArrayAlmostEqual(data, result.data) + + def test_realization_for_greater_than_check_data_lots_of_members(self): + """ + Test to check the behaviour whether the number of percentiles is + greater than the number of members. For when the length of the + percentiles is greater than the length of the members, check that the + points of the realization coordinate is as expected. + """ + data = np.tile(np.linspace(5, 10, 9), 9).reshape(9, 1, 3, 3) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube( + data, "air_temperature", "degreesC", + realizations=np.arange(0, 9)) + + self.realization_cube = ( + _add_forecast_reference_time_and_forecast_period(cube.copy())) + cube.coord("realization").rename("percentile") + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period(cube)) + + expected = np.array([[[[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]], + [[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]], + [[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]], + [[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]], + [[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]], + [[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]]]]) + post_processed_forecast_percentiles = self.percentile_cube + raw_forecast_members = self.realization_cube + raw_forecast_members = raw_forecast_members[:2, :, :, :] + plugin = Plugin() + result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members) + self.assertArrayAlmostEqual(expected, result.data) class Test_rank_ecc(IrisTest): @@ -427,30 +516,6 @@ def test_2d_cube_random_ordering(self): [2], [3]]) - result_data_first = np.array([[1], - [2], - [3]]) - - result_data_second = np.array([[1], - [3], - [2]]) - - result_data_third = np.array([[2], - [1], - [3]]) - - result_data_fourth = np.array([[2], - [3], - [1]]) - - result_data_fifth = np.array([[3], - [1], - [2]]) - - result_data_sixth = np.array([[3], - [2], - [1]]) - cube = self.cube.copy() cube = cube[:, :, 0, 0] raw_cube = cube.copy() @@ -463,47 +528,11 @@ def test_2d_cube_random_ordering(self): random_ordering=True) result.transpose([1, 0]) - err_count = 0 - try: - self.assertArrayAlmostEqual(result.data, result_data_first) - except AssertionError as err1: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_second) - except AssertionError as err2: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_third) - except AssertionError as err3: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_fourth) - except AssertionError as err4: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_fifth) - except AssertionError as err5: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_sixth) - except AssertionError as err6: - err_count += 1 + permutations = list(itertools.permutations(raw_data)) + permutations = [np.array(permutation) for permutation in permutations] - if err_count == 6: - raise ValueError("Exceptions raised as all accepted forms of the " - "calibrated data were not matched." - "1. {}" - "2. {}" - "3. {}" - "4. {}" - "5. {}" - "6. {}".format(err1, err2, err3, - err4, err5, err6)) + matches = [all(aresult == result.data) for aresult in permutations] + self.assertIn(True, matches) class Test_process(IrisTest): @@ -546,30 +575,6 @@ def test_2d_cube_random_ordering(self): [2], [3]]) - result_data_first = np.array([[1], - [2], - [3]]) - - result_data_second = np.array([[1], - [3], - [2]]) - - result_data_third = np.array([[2], - [1], - [3]]) - - result_data_fourth = np.array([[2], - [3], - [1]]) - - result_data_fifth = np.array([[3], - [1], - [2]]) - - result_data_sixth = np.array([[3], - [2], - [1]]) - raw_cube = self.raw_cube[:, :, 0, 0] raw_cube.data = raw_data calibrated_cube = self.calibrated_cube[:, :, 0, 0] @@ -580,47 +585,11 @@ def test_2d_cube_random_ordering(self): random_ordering=True) result.transpose([1, 0]) - err_count = 0 - try: - self.assertArrayAlmostEqual(result.data, result_data_first) - except AssertionError as err1: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_second) - except AssertionError as err2: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_third) - except AssertionError as err3: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_fourth) - except AssertionError as err4: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_fifth) - except AssertionError as err5: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_sixth) - except AssertionError as err6: - err_count += 1 + permutations = list(itertools.permutations(raw_data)) + permutations = [np.array(permutation) for permutation in permutations] - if err_count == 6: - raise ValueError("Exceptions raised as all accepted forms of the " - "calibrated data were not matched." - "1. {}" - "2. {}" - "3. {}" - "4. {}" - "5. {}" - "6. {}".format(err1, err2, err3, - err4, err5, err6)) + matches = [all(aresult == result.data) for aresult in permutations] + self.assertIn(True, matches) if __name__ == '__main__': From ae43e6c86bc92c7fa30aa5c6ddf76606774e0aae Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 10:44:53 +0100 Subject: [PATCH 0103/1367] Addition of test_2d_cube_recycling_raw_ensemble_members unit tests and minor edits to related unit tests. --- ...mble_copula_coupling_EnsembleReordering.py | 85 ++++++++++++++++--- 1 file changed, 75 insertions(+), 10 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index e362b025e2..09306aaa06 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -547,10 +547,11 @@ def setUp(self): self.raw_cube = ( add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) - self.calibrated_cube = ( - add_forecast_reference_time_and_forecast_period( + self.post_processed_percentiles = ( + _add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) - self.calibrated_cube.coord("realization").rename("percentile") + self.post_processed_percentiles.coord("realization").rename( + "percentile") def test_basic(self): """ @@ -558,7 +559,7 @@ def test_basic(self): realization coordinate. """ plugin = Plugin() - result = plugin.process(self.calibrated_cube, self.raw_cube) + result = plugin.process(self.post_processed_percentiles, self.raw_cube) self.assertIsInstance(result, Cube) self.assertTrue(result.coords("realization")) @@ -571,17 +572,18 @@ def test_2d_cube_random_ordering(self): [2], [1]]) - calibrated_data = np.array([[1], - [2], - [3]]) + post_processed_percentiles_data = np.array([[1], + [2], + [3]]) raw_cube = self.raw_cube[:, :, 0, 0] raw_cube.data = raw_data - calibrated_cube = self.calibrated_cube[:, :, 0, 0] - calibrated_cube.data = calibrated_data + post_processed_percentiles = ( + self.post_processed_percentiles[:, :, 0, 0]) + post_processed_percentiles.data = post_processed_percentiles_data plugin = Plugin() - result = plugin.process(calibrated_cube, raw_cube, + result = plugin.process(post_processed_percentiles, raw_cube, random_ordering=True) result.transpose([1, 0]) @@ -591,6 +593,69 @@ def test_2d_cube_random_ordering(self): matches = [all(aresult == result.data) for aresult in permutations] self.assertIn(True, matches) + def test_2d_cube_recycling_raw_ensemble_members(self): + """ + Test that the plugin returns the correct cube data for a + 2d input cube, if the number of raw ensemble members is fewer + than the number of percentiles required, and therefore, raw + ensemble member recycling is required. + + Case where two raw ensemble members are exactly the same, + after the raw ensemble members have been recycled. + The number of raw ensemble members are recycled in order to match + the number of percentiles. + + After recycling the raw _data will be + raw_data = np.array([[1], + [2], + [1]]) + + If there's a tie, the re-ordering randomly allocates the ordering + for the data from the raw ensemble members, which is why there are + two possible options for the resulting post-processed ensemble members. + + Raw ensemble members + 1, 2 + Post-processed percentiles + 1, 2, 3 + After recycling raw ensemble members + 1, 2, 1 + As the second ensemble member(with a data value of 2), is the highest + value, the highest value from the post-processed percentiles will + be the second ensemble member data value within the post-processed + members. The data values of 1 and 2 from the post-processed + percentiles will then be split between the first and third + post-processed ensemble members. + + """ + raw_data = np.array([[1], + [2]]) + + post_processed_percentiles_data = np.array([[1], + [2], + [3]]) + + expected_first = np.array([[1], + [3], + [2]]) + + expected_second = np.array([[2], + [3], + [1]]) + + raw_cube = self.raw_cube[:2, :, 0, 0] + raw_cube.data = raw_data + post_processed_percentiles = ( + self.post_processed_percentiles[:, :, 0, 0]) + post_processed_percentiles.data = post_processed_percentiles_data + + plugin = Plugin() + result = plugin.process(post_processed_percentiles, raw_cube) + result.transpose([1, 0]) + permutations = [expected_first, expected_second] + matches = [all(aresult == result.data) for aresult in permutations] + self.assertIn(True, matches) + if __name__ == '__main__': unittest.main() From 63c5d987d7b3f62b1b51460b1b02b7e9d43a975a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 15:05:05 +0100 Subject: [PATCH 0104/1367] Edits to unit tests following using pylint. --- ...oupling_EnsembleCopulaCouplingUtilities.py | 6 ++-- ...mble_copula_coupling_EnsembleReordering.py | 28 +++++++------------ ..._GeneratePercentilesFromMeanAndVariance.py | 2 +- ...ng_GeneratePercentilesFromProbabilities.py | 9 +++--- 4 files changed, 18 insertions(+), 27 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 2fb2ce4283..3c048cd52e 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -34,7 +34,7 @@ """ import unittest -from iris.coords import DimCoord, AuxCoord +from iris.coords import DimCoord from iris.cube import Cube from iris.exceptions import CoordinateNotFoundError from iris.tests import IrisTest @@ -156,7 +156,7 @@ def test_percentile_length_too_short(self): percentiles = [0.1, 0.5] msg = "Unequal lengths" with self.assertRaisesRegexp(ValueError, msg): - result = create_cube_with_percentiles( + create_cube_with_percentiles( percentiles, cube, cube_data) def test_percentile_length_too_long(self): @@ -171,7 +171,7 @@ def test_percentile_length_too_long(self): percentiles = [0.1, 0.5, 0.9] msg = "Unequal lengths" with self.assertRaisesRegexp(ValueError, msg): - result = create_cube_with_percentiles( + create_cube_with_percentiles( percentiles, cube, cube_data) def test_metadata_copy(self): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 09306aaa06..0407eeeb6e 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -455,21 +455,10 @@ def test_3d_cube_tied_values(self): plugin = Plugin() result = plugin.rank_ecc(calibrated_cube, raw_cube) result.transpose([1, 0, 2]) - - err_count = 0 - try: - self.assertArrayAlmostEqual(result.data, result_data_first) - except Exception as err1: - err_count += 1 - - try: - self.assertArrayAlmostEqual(result.data, result_data_second) - except Exception as err2: - err_count += 1 - - if err_count == 2: - raise ValueError("Exceptions raised by both accepted forms of the " - "calibrated data. {} {}".format(err1, err2)) + permutations = [result_data_first, result_data_second] + matches = [ + np.array_equal(aresult, result.data) for aresult in permutations] + self.assertIn(True, matches) def test_2d_cube(self): """ @@ -531,7 +520,8 @@ def test_2d_cube_random_ordering(self): permutations = list(itertools.permutations(raw_data)) permutations = [np.array(permutation) for permutation in permutations] - matches = [all(aresult == result.data) for aresult in permutations] + matches = [ + np.array_equal(aresult, result.data) for aresult in permutations] self.assertIn(True, matches) @@ -590,7 +580,8 @@ def test_2d_cube_random_ordering(self): permutations = list(itertools.permutations(raw_data)) permutations = [np.array(permutation) for permutation in permutations] - matches = [all(aresult == result.data) for aresult in permutations] + matches = [ + np.array_equal(aresult, result.data) for aresult in permutations] self.assertIn(True, matches) def test_2d_cube_recycling_raw_ensemble_members(self): @@ -653,7 +644,8 @@ def test_2d_cube_recycling_raw_ensemble_members(self): result = plugin.process(post_processed_percentiles, raw_cube) result.transpose([1, 0]) permutations = [expected_first, expected_second] - matches = [all(aresult == result.data) for aresult in permutations] + matches = [ + np.array_equal(aresult, result.data) for aresult in permutations] self.assertIn(True, matches) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 5141cfa58b..100de55259 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -237,7 +237,7 @@ def test_negative_percentiles(self): plugin = Plugin() msg = "NaNs are present within the result for the" with self.assertRaisesRegexp(ValueError, msg): - result = plugin._mean_and_variance_to_percentiles( + plugin._mean_and_variance_to_percentiles( current_forecast_predictor, current_forecast_variance, percentiles) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 0f43b9fa01..8f63748fe3 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -45,7 +45,7 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromProbabilities as Plugin) from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf, units_of_bounds_for_ecdf + import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period) @@ -195,7 +195,7 @@ def test_endpoints_of_distribution_exceeded(self): plugin = Plugin() msg = "The end points added to the threshold values for" with self.assertRaisesRegexp(ValueError, msg): - result = plugin._add_bounds_to_thresholds_and_probabilities( + plugin._add_bounds_to_thresholds_and_probabilities( threshold_points, probabilities_for_cdf, bounds_pairing) @@ -270,7 +270,7 @@ def test_probabilities_not_monotonically_increasing(self): plugin = Plugin() msg = "The probability values used to construct the" with self.assertRaisesRegexp(ValueError, msg): - result = plugin._probabilities_to_percentiles( + plugin._probabilities_to_percentiles( cube, percentiles, bounds_pairing) def test_result_cube_has_no_probability_above_threshold_coordinate(self): @@ -507,11 +507,10 @@ def test_check_exception_is_raised(self): cube = self.current_temperature_forecast_cube cube.standard_name = None cube.long_name = "Nonsense" - bounds_pairing = bounds_for_ecdf["air_temperature"] plugin = Plugin() msg = "The forecast_probabilities name" with self.assertRaisesRegexp(KeyError, msg): - result = plugin._get_bounds_of_distribution(cube) + plugin._get_bounds_of_distribution(cube) class Test_process(IrisTest): From 8a549d1c296dcb5644ddaa34df428be413f56d4a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 15:12:39 +0100 Subject: [PATCH 0105/1367] Pylint edit to ensemble_copula_coupling_utilities.py. --- .../ensemble_copula_coupling_utilities.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index d4d8865b05..78a200d3ed 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -39,7 +39,6 @@ import cf_units as unit import iris -from iris.exceptions import CoordinateNotFoundError def create_percentiles(no_of_percentiles, sampling="quantile"): From 1e3efc064c7ee641736247a8a6d654a6d3c8f1d7 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 15:30:06 +0100 Subject: [PATCH 0106/1367] Add licence information for ensemble_copula_coupling_constants.py. --- .../ensemble_copula_coupling_constants.py | 30 +++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 5c110e89c1..cc84c58958 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -1,3 +1,33 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. """Module to contain constants used for Ensemble Copula Coupling.""" # For the creation of an empirical cumulative distribution function, From 6480da2728f8431698e43517920a0aa0e5061317 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 16:31:49 +0100 Subject: [PATCH 0107/1367] Moved add_bounds_to_thresholds_and_probabilities and get_bounds_of_distribution methods to be functions within the Ensemble Copula Coupling utilities file. --- .../ensemble_copula_coupling.py | 115 ++++-------------- .../ensemble_copula_coupling_utilities.py | 91 ++++++++++++++ 2 files changed, 114 insertions(+), 92 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 5ce9f7fc67..14e8492bb8 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -44,7 +44,27 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ import bounds_for_ecdf, units_of_bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ - import create_cube_with_percentiles, create_percentiles + import (add_bounds_to_thresholds_and_probabilities, + create_cube_with_percentiles, create_percentiles, + get_bounds_of_distribution) + + +class ResamplePercentiles(object): + """ + Class for resampling percentiles from an existing set of percentiles. + In combination with the Ensemble Reordering plugin, this is Ensemble + Copula Coupling. + + This class includes the ability to interpolate from an input set of + percentiles to a different output set of percentiles. + + """ + + def __init__(self): + """ + Initialise the class. + """ + pass class GeneratePercentilesFromProbabilities(object): @@ -70,51 +90,6 @@ def __init__(self): """ pass - def _add_bounds_to_thresholds_and_probabilities( - self, threshold_points, probabilities_for_cdf, bounds_pairing): - """ - Padding of the lower and upper bounds of the distribution for a - given phenomenon for the threshold_points, and padding of - probabilities of 0 and 1 to the forecast probabilities. - - Parameters - ---------- - threshold_points : Numpy array - Array of threshold values used to calculate the probabilities. - probabilities_for_cdf : Numpy array - Array containing the probabilities used for constructing an - empirical cumulative distribution function i.e. probabilities - below threshold. - bounds_pairing : Tuple - Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. - - Returns - ------- - threshold_points : Numpy array - Array of threshold values padded with the lower and upper bound - of the distribution. - probabilities_for_cdf : Numpy array - Array containing the probabilities padded with 0 and 1 at each end. - - """ - lower_bound, upper_bound = bounds_pairing - threshold_points = np.insert(threshold_points, 0, lower_bound) - threshold_points = np.append(threshold_points, upper_bound) - zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) - ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) - probabilities_for_cdf = np.concatenate( - (zeroes_array, probabilities_for_cdf, ones_array), axis=1) - if np.any(np.diff(threshold_points) < 0): - msg = ("The end points added to the threshold values for " - "constructing the Cumulative Distribution Function (CDF) " - "must result in an ascending order. " - "In this case, the threshold points {} must be outside the " - "allowable range given by the bounds {}".format( - threshold_points, bounds_pairing)) - raise ValueError(msg) - return threshold_points, probabilities_for_cdf - def _probabilities_to_percentiles( self, forecast_probabilities, percentiles, bounds_pairing): """ @@ -159,7 +134,7 @@ def _probabilities_to_percentiles( raise ValueError(msg) threshold_points, probabilities_for_cdf = ( - self._add_bounds_to_thresholds_and_probabilities( + _add_bounds_to_thresholds_and_probabilities( threshold_points, probabilities_for_cdf, bounds_pairing)) forecast_at_percentiles = ( @@ -191,50 +166,6 @@ def _probabilities_to_percentiles( percentile_cube.cell_methods = {} return percentile_cube - def _get_bounds_of_distribution(self, forecast_probabilities): - """ - Gets the bounds of the distribution and converts the units of the - bounds_pairing to the units of the forecast. - - This method gets the bounds values and units from the imported - dictionaries: bounds_for_ecdf and units_of_bounds_for_ecdf. - The units of the bounds are converted to be the units of the input - cube. - - Parameters - ---------- - forecast_probabilities : Iris Cube - Cube expected to contain a probability_above_threshold - coordinate. - - Returns - ------- - bounds_pairing : Tuple - Lower and upper bound to be used as the ends of the - empirical cumulative distribution function, converted to have - the same units as the input cube. - - """ - fp_units = ( - forecast_probabilities.coord("probability_above_threshold").units) - # Extract bounds from dictionary of constants. - try: - bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] - bounds_pairing_units = ( - units_of_bounds_for_ecdf[forecast_probabilities.name()]) - except KeyError as err: - msg = ("The forecast_probabilities name: {} is not recognised" - "within bounds_for_ecdf {} or " - "units_of_bounds_for_ecdf: {}. \n" - "Error: {}".format( - forecast_probabilities.name(), bounds_for_ecdf, - units_of_bounds_for_ecdf, err)) - raise KeyError(msg) - bounds_pairing_units = unit.Unit(bounds_pairing_units) - bounds_pairing = bounds_pairing_units.convert( - np.array(bounds_pairing), fp_units) - return bounds_pairing - def process(self, forecast_probabilities, no_of_percentiles=None, sampling="quantile"): """ @@ -281,7 +212,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, no_of_percentiles, sampling=sampling) bounds_pairing = ( - self._get_bounds_of_distribution(forecast_probabilities)) + _get_bounds_of_distribution(forecast_probabilities)) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 78a200d3ed..0bf78e9936 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -41,6 +41,52 @@ import iris +def add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing): + """ + Padding of the lower and upper bounds of the distribution for a + given phenomenon for the threshold_points, and padding of + probabilities of 0 and 1 to the forecast probabilities. + + Parameters + ---------- + threshold_points : Numpy array + Array of threshold values used to calculate the probabilities. + probabilities_for_cdf : Numpy array + Array containing the probabilities used for constructing an + empirical cumulative distribution function i.e. probabilities + below threshold. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. + + Returns + ------- + threshold_points : Numpy array + Array of threshold values padded with the lower and upper bound + of the distribution. + probabilities_for_cdf : Numpy array + Array containing the probabilities padded with 0 and 1 at each end. + + """ + lower_bound, upper_bound = bounds_pairing + threshold_points = np.insert(threshold_points, 0, lower_bound) + threshold_points = np.append(threshold_points, upper_bound) + zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) + ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) + probabilities_for_cdf = np.concatenate( + (zeroes_array, probabilities_for_cdf, ones_array), axis=1) + if np.any(np.diff(threshold_points) < 0): + msg = ("The end points added to the threshold values for " + "constructing the Cumulative Distribution Function (CDF) " + "must result in an ascending order. " + "In this case, the threshold points {} must be outside the " + "allowable range given by the bounds {}".format( + threshold_points, bounds_pairing)) + raise ValueError(msg) + return threshold_points, probabilities_for_cdf + + def create_percentiles(no_of_percentiles, sampling="quantile"): """ Function to create percentiles. @@ -147,3 +193,48 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): dims = tuple([dim+1 for dim in dims]) result.add_aux_coord(coord.copy(), dims) return result + + +def get_bounds_of_distribution(forecast_probabilities): + """ + Gets the bounds of the distribution and converts the units of the + bounds_pairing to the units of the forecast. + + This method gets the bounds values and units from the imported + dictionaries: bounds_for_ecdf and units_of_bounds_for_ecdf. + The units of the bounds are converted to be the units of the input + cube. + + Parameters + ---------- + forecast_probabilities : Iris Cube + Cube expected to contain a probability_above_threshold + coordinate. + + Returns + ------- + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function, converted to have + the same units as the input cube. + + """ + fp_units = ( + forecast_probabilities.coord("probability_above_threshold").units) + # Extract bounds from dictionary of constants. + try: + bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] + bounds_pairing_units = ( + units_of_bounds_for_ecdf[forecast_probabilities.name()]) + except KeyError as err: + msg = ("The forecast_probabilities name: {} is not recognised" + "within bounds_for_ecdf {} or " + "units_of_bounds_for_ecdf: {}. \n" + "Error: {}".format( + forecast_probabilities.name(), bounds_for_ecdf, + units_of_bounds_for_ecdf, err)) + raise KeyError(msg) + bounds_pairing_units = unit.Unit(bounds_pairing_units) + bounds_pairing = bounds_pairing_units.convert( + np.array(bounds_pairing), fp_units) + return bounds_pairing From fc79a441a74184ed7709b90d8f53709070b33dcb Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 17:33:19 +0100 Subject: [PATCH 0108/1367] Moved add_bounds_to_thresholds_and_probabilities back from utilities, as it seems like it will be simpler to create a separate method for adding bounds for percentiles. Other edits made to get_bounds_of_distribution to make it more generic, as this function is more suited to being a utility. --- .../ensemble_copula_coupling.py | 45 ++++++++++++- .../ensemble_copula_coupling_utilities.py | 67 +++---------------- 2 files changed, 55 insertions(+), 57 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 14e8492bb8..759bf7c964 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -90,6 +90,48 @@ def __init__(self): """ pass + def _add_bounds_to_thresholds_and_probabilities( + self, threshold_points, probabilities_for_cdf, bounds_pairing): + """ + Padding of the lower and upper bounds of the distribution for a + given phenomenon for the threshold_points, and padding of + probabilities of 0 and 1 to the forecast probabilities. + Parameters + ---------- + threshold_points : Numpy array + Array of threshold values used to calculate the probabilities. + probabilities_for_cdf : Numpy array + Array containing the probabilities used for constructing an + empirical cumulative distribution function i.e. probabilities + below threshold. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. + Returns + ------- + threshold_points : Numpy array + Array of threshold values padded with the lower and upper bound + of the distribution. + probabilities_for_cdf : Numpy array + Array containing the probabilities padded with 0 and 1 at each end. + """ + lower_bound, upper_bound = bounds_pairing + threshold_points = np.insert(threshold_points, 0, lower_bound) + threshold_points = np.append(threshold_points, upper_bound) + zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) + ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) + probabilities_for_cdf = np.concatenate( + (zeroes_array, probabilities_for_cdf, ones_array), axis=1) + if np.any(np.diff(threshold_points) < 0): + msg = ("The end points added to the threshold values for " + "constructing the Cumulative Distribution Function (CDF) " + "must result in an ascending order. " + "In this case, the threshold points {} must be outside the " + "allowable range given by the bounds {}".format( + threshold_points, bounds_pairing)) + raise ValueError(msg) + return threshold_points, probabilities_for_cdf + def _probabilities_to_percentiles( self, forecast_probabilities, percentiles, bounds_pairing): """ @@ -212,7 +254,8 @@ def process(self, forecast_probabilities, no_of_percentiles=None, no_of_percentiles, sampling=sampling) bounds_pairing = ( - _get_bounds_of_distribution(forecast_probabilities)) + _get_bounds_of_distribution( + forecast_probabilities, "probability_above_threshold")) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 0bf78e9936..f1080a63e1 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -41,52 +41,6 @@ import iris -def add_bounds_to_thresholds_and_probabilities( - threshold_points, probabilities_for_cdf, bounds_pairing): - """ - Padding of the lower and upper bounds of the distribution for a - given phenomenon for the threshold_points, and padding of - probabilities of 0 and 1 to the forecast probabilities. - - Parameters - ---------- - threshold_points : Numpy array - Array of threshold values used to calculate the probabilities. - probabilities_for_cdf : Numpy array - Array containing the probabilities used for constructing an - empirical cumulative distribution function i.e. probabilities - below threshold. - bounds_pairing : Tuple - Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. - - Returns - ------- - threshold_points : Numpy array - Array of threshold values padded with the lower and upper bound - of the distribution. - probabilities_for_cdf : Numpy array - Array containing the probabilities padded with 0 and 1 at each end. - - """ - lower_bound, upper_bound = bounds_pairing - threshold_points = np.insert(threshold_points, 0, lower_bound) - threshold_points = np.append(threshold_points, upper_bound) - zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) - ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) - probabilities_for_cdf = np.concatenate( - (zeroes_array, probabilities_for_cdf, ones_array), axis=1) - if np.any(np.diff(threshold_points) < 0): - msg = ("The end points added to the threshold values for " - "constructing the Cumulative Distribution Function (CDF) " - "must result in an ascending order. " - "In this case, the threshold points {} must be outside the " - "allowable range given by the bounds {}".format( - threshold_points, bounds_pairing)) - raise ValueError(msg) - return threshold_points, probabilities_for_cdf - - def create_percentiles(no_of_percentiles, sampling="quantile"): """ Function to create percentiles. @@ -195,7 +149,7 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): return result -def get_bounds_of_distribution(forecast_probabilities): +def get_bounds_of_distribution(forecast_cube, coord_for_units): """ Gets the bounds of the distribution and converts the units of the bounds_pairing to the units of the forecast. @@ -207,8 +161,9 @@ def get_bounds_of_distribution(forecast_probabilities): Parameters ---------- - forecast_probabilities : Iris Cube - Cube expected to contain a probability_above_threshold + forecast_cube : Iris Cube + Input cube containing the coordinate from which the units + of the bounds_pairing should be converted to. coordinate. Returns @@ -219,22 +174,22 @@ def get_bounds_of_distribution(forecast_probabilities): the same units as the input cube. """ - fp_units = ( - forecast_probabilities.coord("probability_above_threshold").units) + cube_units = ( + forecast_cube.coord(coord_for_units).units) # Extract bounds from dictionary of constants. try: - bounds_pairing = bounds_for_ecdf[forecast_probabilities.name()] + bounds_pairing = bounds_for_ecdf[forecast_cube.name()] bounds_pairing_units = ( - units_of_bounds_for_ecdf[forecast_probabilities.name()]) + units_of_bounds_for_ecdf[forecast_cube.name()]) except KeyError as err: - msg = ("The forecast_probabilities name: {} is not recognised" + msg = ("The forecast_cube name: {} is not recognised" "within bounds_for_ecdf {} or " "units_of_bounds_for_ecdf: {}. \n" "Error: {}".format( - forecast_probabilities.name(), bounds_for_ecdf, + forecast_cube.name(), bounds_for_ecdf, units_of_bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( - np.array(bounds_pairing), fp_units) + np.array(bounds_pairing), cube_units) return bounds_pairing From 47aa0bd4d83b6c29943646cf9afe1f03cb9fac50 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 18:27:45 +0100 Subject: [PATCH 0109/1367] First attempt at a plugin to use linear interpolation to generate a second set of percentiles from an original set of percentiles. No unit tests yet. Some refactoring to factor out shared functionality may be required. --- .../ensemble_copula_coupling.py | 155 +++++++++++++++++- 1 file changed, 154 insertions(+), 1 deletion(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 759bf7c964..d1d168408c 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -66,6 +66,159 @@ def __init__(self): """ pass + def _add_bounds_to_percentiles_and_forecast_values( + self, percentiles, forecast_values, bounds_pairing): + """ + Padding of the lower and upper bounds of the percentiles for a + given phenomenon, and padding of forecast values using the + constant lower and upper bounds. + + Parameters + ---------- + percentiles : Numpy array + Array of percentiles from a Cumulative Distribution Function. + forecast_values : Numpy array + Array containing the underlying forecast values at each percentile. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. + Returns + ------- + percentiles : Numpy array + Array of percentiles from a Cumulative Distribution Function. + forecast_values : Numpy array + Array containing the underlying forecast values at each percentile. + """ + lower_bound, upper_bound = bounds_pairing + percentiles = np.insert(percentiles, 0, 0) + percentiles = np.append(percentiles, 1) + lower_array = np.full((forecast_values.shape[0], 1), lower_bound) + upper_array = np.full((forecast_values.shape[0], 1), upper_bound) + forecast_values = np.concatenate( + (lower_array, forecast_values, upper_array), axis=1) + if np.any(np.diff(forecast_values) < 0): + msg = ("The end points added to the forecast values " + "representing for each percentile must result in " + "an ascending order. " + "In this case, the forecast values {} must be outside the " + "allowable range given by the bounds {}".format( + forecast_values, bounds_pairing)) + raise ValueError(msg) + return percentiles, forecast_values + + def _sample_percentiles( + self, forecast_at_percentiles, desired_percentiles): + """ + Interpolation of forecast for a set of percentiles from an initial + set of percentiles to a new set of percentiles. This is constructed + by linearly interpolating between the original set of percentiles + to a new set of percentiles. + + Parameters + ---------- + forecast_at_percentiles : Iris CubeList or Iris Cube + Cube or CubeList expected to contain a percentile coordinate. + desired_percentiles : Numpy array + Array of the desired percentiles. + + Returns + ------- + percentile_cube : Iris cube + Cube containing values for the required diagnostic e.g. + air_temperature at the required percentiles. + + """ + original_percentiles = ( + forecast_at_percentiles.coord("percentiles").points) + + forecast_at_reshaped_percentiles = convert_cube_data_to_2d( + forecast_at_percentiles, coord="percentiles") + + original_percentiles, forecast_at_reshaped_percentiles = ( + _add_bounds_to_percentiles_and_forecast_values( + original_percentiles, forecast_at_reshaped_percentiles, + bounds_pairing)) + + forecast_at_interpolated_percentiles = ( + np.empty( + (forecast_at_reshaped_percentiles.shape[0], + len(original_percentiles)))) + for index in range(forecast_at_reshaped_percentiles.shape[0]): + forecast_at_interpolated_percentiles[index, :] = np.interp( + desired_percentiles, + forecast_at_reshaped_percentiles[index, :], + originial_percentiles) + + # Reshape forecast_at_percentiles, so the percentiles dimension is + # first, and any other dimension coordinates follow. + shape_to_reshape_to = list(forecast_at_percentiles.shape) + if forecast_at_percentiles.coord_dims("percentiles"): + pat_coord_position = ( + forecast_at_percentiles.coord_dims("percentiles")) + shape_to_reshape_to.pop(pat_coord_position[0]) + shape_to_reshape_to = [len(desired_percentiles)] + shape_to_reshape_to + + forecast_at_percentiles = ( + forecast_at_percentiles.reshape(shape_to_reshape_to)) + + for template_cube in forecast_at_percentiles.slices_over( + "percentiles"): + template_cube.remove_coord("percentiles") + break + percentile_cube = create_cube_with_percentiles( + desired_percentiles, template_cube, forecast_at_percentiles) + percentile_cube.cell_methods = {} + return percentile_cube + + def process(self, forecast_at_percentiles, no_of_percentiles=None, + sampling="quantile"): + """ + 1. Concatenates cubes with a percentile coordinate. + 2. Creates a list of percentiles. + 3. Accesses the lower and upper bound pair of the forecast values, + in order to specify lower and upper bounds for the percentiles. + 4. Interpolate the percentile coordinate into an alternative + set of percentiles using linear interpolation. + + Parameters + ---------- + forecast_at_percentiles : Iris CubeList or Iris Cube + Cube or CubeList expected to contain a percentile coordinate. + no_of_percentiles : Integer or None + Number of percentiles + If None, the number of percentiles within the input + forecast_percentiles cube is used as the number of percentiles. + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + Returns + ------- + forecast_at_percentiles : Iris cube + Cube with forecast values at the desired set of percentiles. + + """ + forecast_at_percentiles = concatenate_cubes(forecast_at_percentiles) + + if no_of_percentiles is None: + no_of_percentiles = ( + len(forecast_at_percentiles.coord("percentiles").points)) + + percentiles = create_percentiles( + no_of_percentiles, sampling=sampling) + + bounds_pairing = ( + get_bounds_of_distribution(forecast_at_percentiles, "percentiles")) + + forecast_at_percentiles = self._sample_percentiles( + forecast_at_percentiles, percentiles, bounds_pairing) + return forecast_at_percentiles + class GeneratePercentilesFromProbabilities(object): """ @@ -254,7 +407,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, no_of_percentiles, sampling=sampling) bounds_pairing = ( - _get_bounds_of_distribution( + get_bounds_of_distribution( forecast_probabilities, "probability_above_threshold")) forecast_at_percentiles = self._probabilities_to_percentiles( From fb04ab7cdc46d910dd1d5fa233886665273b93e5 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 17 May 2017 18:31:07 +0100 Subject: [PATCH 0110/1367] Move unit tests for get_bounds_of_distribution. --- ...oupling_EnsembleCopulaCouplingUtilities.py | 48 +++++++++++++++++ ...ng_GeneratePercentilesFromProbabilities.py | 52 ------------------- 2 files changed, 48 insertions(+), 52 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 3c048cd52e..bf80252f54 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -252,5 +252,53 @@ def test_unknown_sampling_option(self): create_percentiles(no_of_percentiles, sampling="unknown") +class Test_get_bounds_of_distribution(IrisTest): + + """Test the get_bounds_of_distribution plugin.""" + + def setUp(self): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the result is a numpy array.""" + cube = self.current_temperature_forecast_cube + result = get_bounds_of_distribution(cube) + self.assertIsInstance(result, np.ndarray) + + def test_check_data(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + bounds_pairing = (-40, 50) + result = get_bounds_of_distribution(cube) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_unit_conversion(self): + """ + Test that the expected results are returned for the bounds_pairing, + if the units of the bounds_pairings need to be converted to match + the units of the forecast. + """ + cube = self.current_temperature_forecast_cube + cube.coord("probability_above_threshold").convert_units("fahrenheit") + bounds_pairing = (-40, 122) # In fahrenheit + result = get_bounds_of_distribution(cube) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_exception_is_raised(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + cube.standard_name = None + cube.long_name = "Nonsense" + msg = "The forecast_probabilities name" + with self.assertRaisesRegexp(KeyError, msg): + get_bounds_of_distribution(cube) + + if __name__ == '__main__': unittest.main() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 8f63748fe3..449f8781d6 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -461,58 +461,6 @@ def test_check_data_spot_forecasts(self): self.assertArrayAlmostEqual(result.data, data) -class Test__get_bounds_of_distribution(IrisTest): - - """Test the _get_bounds_of_distribution plugin.""" - - def setUp(self): - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - - def test_basic(self): - """Test that the result is a numpy array.""" - cube = self.current_temperature_forecast_cube - plugin = Plugin() - result = plugin._get_bounds_of_distribution(cube) - self.assertIsInstance(result, np.ndarray) - - def test_check_data(self): - """ - Test that the expected results are returned for the bounds_pairing. - """ - cube = self.current_temperature_forecast_cube - bounds_pairing = (-40, 50) - plugin = Plugin() - result = plugin._get_bounds_of_distribution(cube) - self.assertArrayAlmostEqual(result, bounds_pairing) - - def test_check_unit_conversion(self): - """ - Test that the expected results are returned for the bounds_pairing, - if the units of the bounds_pairings need to be converted to match - the units of the forecast. - """ - cube = self.current_temperature_forecast_cube - cube.coord("probability_above_threshold").convert_units("fahrenheit") - bounds_pairing = (-40, 122) # In fahrenheit - plugin = Plugin() - result = plugin._get_bounds_of_distribution(cube) - self.assertArrayAlmostEqual(result, bounds_pairing) - - def test_check_exception_is_raised(self): - """ - Test that the expected results are returned for the bounds_pairing. - """ - cube = self.current_temperature_forecast_cube - cube.standard_name = None - cube.long_name = "Nonsense" - plugin = Plugin() - msg = "The forecast_probabilities name" - with self.assertRaisesRegexp(KeyError, msg): - plugin._get_bounds_of_distribution(cube) - - class Test_process(IrisTest): """Test the _create_cube_with_percentiles plugin.""" From a9e903495c855e5118b90cb40892f257af129040 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 18 May 2017 07:40:05 +0100 Subject: [PATCH 0111/1367] First version of the unit tests required for ResamplePercentiles, although, these are untested and largely copies from GeneratePercentilesFromProbabilities. As ResamplePercentiles and GeneratePercentilesFromProbabilities, some refactoring will need to be considered before finalising unit tests. --- .../ensemble_copula_coupling.py | 30 +- ...ng_GeneratePercentilesFromProbabilities.py | 18 +- ...ble_copula_coupling_ResamplePercentiles.py | 441 ++++++++++++++++++ 3 files changed, 470 insertions(+), 19 deletions(-) create mode 100644 lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index d1d168408c..960449b863 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -66,8 +66,8 @@ def __init__(self): """ pass - def _add_bounds_to_percentiles_and_forecast_values( - self, percentiles, forecast_values, bounds_pairing): + def _add_bounds_to_percentiles_and_forecast_at_percentiles( + self, percentiles, forecast_at_percentiles, bounds_pairing): """ Padding of the lower and upper bounds of the percentiles for a given phenomenon, and padding of forecast values using the @@ -77,7 +77,7 @@ def _add_bounds_to_percentiles_and_forecast_values( ---------- percentiles : Numpy array Array of percentiles from a Cumulative Distribution Function. - forecast_values : Numpy array + forecast_at_percentiles : Numpy array Array containing the underlying forecast values at each percentile. bounds_pairing : Tuple Lower and upper bound to be used as the ends of the @@ -86,25 +86,27 @@ def _add_bounds_to_percentiles_and_forecast_values( ------- percentiles : Numpy array Array of percentiles from a Cumulative Distribution Function. - forecast_values : Numpy array + forecast_at_percentiles : Numpy array Array containing the underlying forecast values at each percentile. """ lower_bound, upper_bound = bounds_pairing percentiles = np.insert(percentiles, 0, 0) percentiles = np.append(percentiles, 1) - lower_array = np.full((forecast_values.shape[0], 1), lower_bound) - upper_array = np.full((forecast_values.shape[0], 1), upper_bound) - forecast_values = np.concatenate( - (lower_array, forecast_values, upper_array), axis=1) - if np.any(np.diff(forecast_values) < 0): - msg = ("The end points added to the forecast values " - "representing for each percentile must result in " + lower_array = ( + np.full((forecast_at_percentiles.shape[0], 1), lower_bound)) + upper_array = ( + np.full((forecast_at_percentiles.shape[0], 1), upper_bound)) + forecast_at_percentiles = np.concatenate( + (lower_array, forecast_at_percentiles, upper_array), axis=1) + if np.any(np.diff(forecast_at_percentiles) < 0): + msg = ("The end points added to the forecast at percentiles " + "values representing for each percentile must result in " "an ascending order. " "In this case, the forecast values {} must be outside the " "allowable range given by the bounds {}".format( - forecast_values, bounds_pairing)) + forecast_at_percentiles, bounds_pairing)) raise ValueError(msg) - return percentiles, forecast_values + return percentiles, forecast_at_percentiles def _sample_percentiles( self, forecast_at_percentiles, desired_percentiles): @@ -135,7 +137,7 @@ def _sample_percentiles( forecast_at_percentiles, coord="percentiles") original_percentiles, forecast_at_reshaped_percentiles = ( - _add_bounds_to_percentiles_and_forecast_values( + _add_bounds_to_percentiles_and_forecast_at_percentiles( original_percentiles, forecast_at_reshaped_percentiles, bounds_pairing)) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 449f8781d6..0e0a9247f9 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -30,8 +30,7 @@ # POSSIBILITY OF SUCH DAMAGE. """ Unit tests for the -`plugins_ensemble_copula_coupling.GeneratePercentilesFromProbabilities` -class. +`ensemble_copula_coupling.GeneratePercentilesFromProbabilities` class. """ import numpy as np @@ -130,7 +129,10 @@ def set_up_spot_temperature_cube(): class Test__add_bounds_to_thresholds_and_probabilities(IrisTest): - """Test the _add_bounds_to_thresholds_and_probabilities plugin.""" + """ + Test the _add_bounds_to_thresholds_and_probabilities method of the + GeneratePercentilesFromProbabilities. + """ def setUp(self): self.current_temperature_forecast_cube = ( @@ -201,7 +203,10 @@ def test_endpoints_of_distribution_exceeded(self): class Test__probabilities_to_percentiles(IrisTest): - """Test the _create_cube_with_percentiles plugin.""" + """ + Test the _probabilities_to_percentiles method of the + GeneratePercentilesFromProbabilities plugin. + """ def setUp(self): """Set up temperature cube.""" @@ -463,7 +468,10 @@ def test_check_data_spot_forecasts(self): class Test_process(IrisTest): - """Test the _create_cube_with_percentiles plugin.""" + """ + Test the process method of the GeneratePercentilesFromProbabilities + plugin. + """ def setUp(self): """Set up temperature cube.""" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py new file mode 100644 index 0000000000..c20aa84186 --- /dev/null +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -0,0 +1,441 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +Unit tests for the `ensemble_copula_coupling.ResamplePercentiles` class. +""" + +class Test__add_bounds_to_percentiles_and_forecast_values(IrisTest): + + """ + Test the _add_bounds_to_percentiles_and_forecast_values method of the + ResamplePercentiles plugin. + """ + + def setUp(self): + data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 1, 3, 3) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube(data, "air_temperature", "degreesC") + self.realization_cube = ( + _add_forecast_reference_time_and_forecast_period(cube.copy())) + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period(cube)) + + def test_basic(self): + """Test that the plugin returns two numpy arrays.""" + cube = self.percentile_cube + percentiles = cube.coord("percentile").points + forecast_at_percentiles = cube.data.reshape(3, 9) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( + percentiles, forecast_at_percentiles, bounds_pairing) + self.assertIsInstance(result[0], np.ndarray) + self.assertIsInstance(result[1], np.ndarray) + + def test_bounds_of_percentiles(self): + """ + Test that the plugin returns the expected results for the + percentiles, where they've been padded with the values from + the bounds_pairing. + """ + cube = self.percentile_cube + percentiles = cube.coord("percentile").points + forecast_at_percentiles = cube.data.reshape(3, 9) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( + percentiles, forecast_at_percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result[0][0], bounds_pairing[0]) + self.assertArrayAlmostEqual(result[0][-1], bounds_pairing[1]) + + def test_probability_data(self): + """ + Test that the plugin returns the expected results for the + probabilities, where they've been padded with zeros and ones to + represent the extreme ends of the Cumulative Distribution Function. + """ + cube = self.percentile_cube + percentiles = cube.coord("percentile").points + forecast_at_percentiles = cube.data.reshape(3, 9) + zero_array = np.zeros(forecast_at_percentiles[:, 0].shape) + one_array = np.ones(forecast_at_percentiles[:, 0].shape) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( + percentiles, forecast_at_percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result[1][:, 0], zero_array) + self.assertArrayAlmostEqual(result[1][:, -1], one_array) + + def test_endpoints_of_distribution_exceeded(self): + """ + Test that the plugin raises a ValueError when the constant + end points of the distribution are exceeded by a threshold value + used in the forecast. + """ + forecast_at_percentiles = np.array([[0.05, 0.7, 0.95]]) + percentiles = np.array([8, 10, 60]) + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The end points added to the threshold values for" + with self.assertRaisesRegexp(ValueError, msg): + plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( + percentiles, forecast_at_percentiles, bounds_pairing) + + +class Test__sample_percentiles(IrisTest): + + """ + Test the _sample_percentiles method of the ResamplePercentiles plugin. + """ + + def setUp(self): + data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 1, 3, 3) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube(data, "air_temperature", "degreesC") + self.realization_cube = ( + _add_forecast_reference_time_and_forecast_period(cube.copy())) + cube.coord("realization").rename("percentile") + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period(cube)) + + def test_basic(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertIsInstance(result, Cube) + + def test_simple_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + + The input cube contains probabilities greater than a given threshold. + """ + expected = np.array([8.15384615, 9.38461538, 11.6]) + expected = expected[:, np.newaxis, np.newaxis, np.newaxis] + + data = np.array([0.95, 0.3, 0.05]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 10, 12], y_dimension_length=1, + x_dimension_length=1))) + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, expected) + + def test_probabilities_not_monotonically_increasing(self): + """ + Test that the plugin raises a ValueError when the probabilities + of the Cumulative Distribution Function are not monotonically + increasing. + """ + data = np.array([0.05, 0.7, 0.95]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + forecast_thresholds=[8, 10, 12], y_dimension_length=1, + x_dimension_length=1))) + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The probability values used to construct the" + with self.assertRaisesRegexp(ValueError, msg): + plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + + def test_result_cube_has_no_probability_above_threshold_coordinate(self): + """ + Test that the plugin returns a cube with coordinates that + do not include the probability_above_threshold coordinate. + """ + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + for coord in result.coords(): + self.assertNotEqual(coord.name(), "probability_above_threshold") + + def test_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + """ + data = np.array([[[[15.8, 31., 46.2], + [8., 10., 31.], + [10.4, 12., 42.4]]], + [[[-16., 10, 31.], + [8., 10., 11.6], + [-30.4, 8., 12.]]], + [[[-30.4, 8., 11.], + [-34., -10., 9], + [-35.2, -16., 3.2]]]]) + + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_check_single_threshold(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if a single threshold is used for + constructing the percentiles. + """ + data = np.array([[[[12.2, 29., 45.8], + [8., 26.66666667, 45.33333333], + [12.2, 29., 45.8]]], + [[[-16., 23.75, 44.75], + [8., 26.66666667, 45.33333333], + [-30.4, 8., 41.6]]], + [[[-30.4, 8., 41.6], + [-34., -10., 29.], + [-35.2, -16., 3.2]]]]) + + for acube in self.current_temperature_forecast_cube.slices_over( + "probability_above_threshold"): + cube = acube + break + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_lots_of_probability_thresholds(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if there are lots of thresholds. + """ + input_probs_1d = np.linspace(1, 0, 30) + input_probs = np.tile(input_probs_1d, (3, 3, 1, 1)).T + + data = np.array([[[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]], + [[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]], + [[[2.9, 14.5, 26.1], + [2.9, 14.5, 26.1], + [2.9, 14.5, 26.1]]]]) + + temperature_values = np.arange(0, 30) + cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube(input_probs, "air_temperature", "1", + forecast_thresholds=temperature_values))) + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_lots_of_percentiles(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles, if lots of percentile values are + requested. + """ + data = np.array([[[[13.9, 15.8, 17.7], + [19.6, 21.5, 23.4], + [25.3, 27.2, 29.1]]], + [[[31., 32.9, 34.8], + [36.7, 38.6, 40.5], + [42.4, 44.3, 46.2]]], + [[[48.1, -16., 8.], + [8.25, 8.5, 8.75], + [9., 9.25, 9.5]]], + [[[9.75, 10., 10.33333333], + [10.66666667, 11., 11.33333333], + [11.66666667, 12., 21.5]]], + [[[31., 40.5, 10.2], + [10.4, 10.6, 10.8], + [11., 11.2, 11.4]]], + [[[11.6, 11.8, 12.], + [15.8, 19.6, 23.4], + [27.2, 31., 34.8]]], + [[[38.6, 42.4, 46.2], + [-28., -16., -4.], + [8., 8.33333333, 8.66666667]]], + [[[9., 9.33333333, 9.66666667], + [10., 10.33333333, 10.66666667], + [11., 11.33333333, 11.66666667]]], + [[[12., 21.5, 31.], + [40.5, -16., 8.], + [8.25, 8.5, 8.75]]], + [[[9., 9.25, 9.5], + [9.75, 10., 10.2], + [10.4, 10.6, 10.8]]], + [[[11., 11.2, 11.4], + [11.6, 11.8, -35.2], + [-30.4, -25.6, -20.8]]], + [[[-16., -11.2, -6.4], + [-1.6, 3.2, 8.], + [8.5, 9., 9.5]]], + [[[10., 10.5, 11.], + [11.5, 12., 31.], + [-35.2, -30.4, -25.6]]], + [[[-20.8, -16., -11.2], + [-6.4, -1.6, 3.2], + [8., 8.33333333, 8.66666667]]], + [[[9., 9.33333333, 9.66666667], + [10., 10.5, 11.], + [11.5, -37., -34.]]], + [[[-31., -28., -25.], + [-22., -19., -16.], + [-13., -10., -7.]]], + [[[-4., -1., 2.], + [5., 8., 8.5], + [9., 9.5, -37.6]]], + [[[-35.2, -32.8, -30.4], + [-28., -25.6, -23.2], + [-20.8, -18.4, -16.]]], + [[[-13.6, -11.2, -8.8], + [-6.4, -4., -1.6], + [0.8, 3.2, 5.6]]]]) + cube = self.current_temperature_forecast_cube + percentiles = np.arange(0.05, 1.0, 0.05) + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + def test_check_data_spot_forecasts(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles for spot forecasts. + """ + data = np.array([[[15.8, 31., 46.2, + 8., 10., 31., + 10.4, 12., 42.4]], + [[-16., 10, 31., + 8., 10., 11.6, + -30.4, 8., 12.]], + [[-30.4, 8., 11., + -34., -10., 9, + -35.2, -16., 3.2]]]) + cube = self.current_temperature_spot_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, data) + + +class Test_process(IrisTest): + + """Test the process plugin of the Resample Percentiles plugin.""" + + def setUp(self): + data = np.tile(np.linspace(5, 10, 9), 3).reshape(3, 1, 3, 3) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube(data, "air_temperature", "degreesC") + self.realization_cube = ( + _add_forecast_reference_time_and_forecast_period(cube.copy())) + cube.coord("realization").rename("percentile") + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period(cube)) + + def test_check_data_specifying_percentiles(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for a specific number of percentiles. + """ + data = np.array([[[[21.5, 31., 40.5], + [8.75, 10., 11.66666667], + [11., 12., 31.]]], + [[[8.33333333, 10., 11.66666667], + [8.75, 10., 11.], + [-16., 8., 10.5]]], + [[[-16., 8., 9.66666667], + [-25., -10., 5.], + [-28., -16., -4.]]]]) + + cube = self.percentile_cube + percentiles = [0.25, 0.5, 0.75] + plugin = Plugin() + result = plugin.process( + cube, no_of_percentiles=len(percentiles)) + self.assertArrayAlmostEqual(result.data, data) + + def test_check_data_not_specifying_percentiles(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values without specifying the number of percentiles. + """ + data = np.array([[[[21.5, 31., 40.5], + [8.75, 10., 11.66666667], + [11., 12., 31.]]], + [[[8.33333333, 10., 11.66666667], + [8.75, 10., 11.], + [-16., 8., 10.5]]], + [[[-16., 8., 9.66666667], + [-25., -10., 5.], + [-28., -16., -4.]]]]) + + cube = self.percentile_cube + plugin = Plugin() + result = plugin.process(cube) + self.assertArrayAlmostEqual(result.data, data) + From 30e50b1306aba663fc88199440a085c6f37dc4cd Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 18 May 2017 08:48:53 +0100 Subject: [PATCH 0112/1367] Latest refactoring to try to reduce code duplication between plugins. --- .../ensemble_copula_coupling.py | 49 ++++++++++--------- .../ensemble_copula_coupling_utilities.py | 44 +++++++++++++++++ 2 files changed, 69 insertions(+), 24 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 960449b863..167b4f4ea5 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -44,9 +44,10 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ import bounds_for_ecdf, units_of_bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ - import (add_bounds_to_thresholds_and_probabilities, + import (concatenate_2d_array_with_2darray_endpoints, create_cube_with_percentiles, create_percentiles, - get_bounds_of_distribution) + get_bounds_of_distribution, + insert_lower_and_upper_endpoint_to_1d_array) class ResamplePercentiles(object): @@ -90,26 +91,23 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( Array containing the underlying forecast values at each percentile. """ lower_bound, upper_bound = bounds_pairing - percentiles = np.insert(percentiles, 0, 0) - percentiles = np.append(percentiles, 1) - lower_array = ( - np.full((forecast_at_percentiles.shape[0], 1), lower_bound)) - upper_array = ( - np.full((forecast_at_percentiles.shape[0], 1), upper_bound)) - forecast_at_percentiles = np.concatenate( - (lower_array, forecast_at_percentiles, upper_array), axis=1) + percentiles = insert_lower_and_upper_endpoint_to_1d_array( + percentiles, percentiles, 0, 1) + forecast_at_percentiles = concatenate_2d_array_with_2darray_endpoints( + forecast_at_percentiles, lower_bound, upper_bound) if np.any(np.diff(forecast_at_percentiles) < 0): msg = ("The end points added to the forecast at percentiles " "values representing for each percentile must result in " "an ascending order. " - "In this case, the forecast values {} must be outside the " - "allowable range given by the bounds {}".format( + "In this case, the forecast at percentile values {} " + "must be outside the allowable range given by the " + "bounds {}".format( forecast_at_percentiles, bounds_pairing)) raise ValueError(msg) return percentiles, forecast_at_percentiles def _sample_percentiles( - self, forecast_at_percentiles, desired_percentiles): + self, forecast_at_percentiles, desired_percentiles, bounds_pairing): """ Interpolation of forecast for a set of percentiles from an initial set of percentiles to a new set of percentiles. This is constructed @@ -122,6 +120,9 @@ def _sample_percentiles( Cube or CubeList expected to contain a percentile coordinate. desired_percentiles : Numpy array Array of the desired percentiles. + bounds_pairing : Tuple + Lower and upper bound to be used as the ends of the + empirical cumulative distribution function. Returns ------- @@ -251,6 +252,7 @@ def _add_bounds_to_thresholds_and_probabilities( Padding of the lower and upper bounds of the distribution for a given phenomenon for the threshold_points, and padding of probabilities of 0 and 1 to the forecast probabilities. + Parameters ---------- threshold_points : Numpy array @@ -271,19 +273,18 @@ def _add_bounds_to_thresholds_and_probabilities( Array containing the probabilities padded with 0 and 1 at each end. """ lower_bound, upper_bound = bounds_pairing - threshold_points = np.insert(threshold_points, 0, lower_bound) - threshold_points = np.append(threshold_points, upper_bound) - zeroes_array = np.zeros((probabilities_for_cdf.shape[0], 1)) - ones_array = np.ones((probabilities_for_cdf.shape[0], 1)) - probabilities_for_cdf = np.concatenate( - (zeroes_array, probabilities_for_cdf, ones_array), axis=1) + threshold_points = insert_lower_and_upper_endpoint_to_1d_array( + threshold_points, lower_bound, upper_bound) + probabilities_for_cdf = concatenate_2d_array_with_2darray_endpoints( + probabilities_for_cdf, 0, 1) if np.any(np.diff(threshold_points) < 0): msg = ("The end points added to the threshold values for " - "constructing the Cumulative Distribution Function (CDF) " - "must result in an ascending order. " - "In this case, the threshold points {} must be outside the " - "allowable range given by the bounds {}".format( - threshold_points, bounds_pairing)) + "constructing the Cumulative Distribution Function (CDF) " + "must result in an ascending order. " + "In this case, the threshold points {} must be " + "outside the allowable range given by the " + "bounds {}".format( + threshold_points, bounds_pairing)) raise ValueError(msg) return threshold_points, probabilities_for_cdf diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index f1080a63e1..d233441d9d 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -41,6 +41,50 @@ import iris +def insert_lower_and_upper_endpoint_to_1d_array( + array_1d, low_endpoint, high_endpoint): + """ + For a 1d array, add a lower and upper endpoint. + + Parameters + ---------- + array_1d : Numpy array + 1d array of values + low_endpoint : Number + Number of use as the lower endpoint. + high_endpoint : Number + Number of use as the upper endpoint. + """ + percentiles = np.insert(percentiles, 0, low_endpoint) + percentiles = np.append(percentiles, high_endpoint) + return percentiles + + +def concatenate_2d_array_with_2darray_endpoints( + array_2d, low_endpoint, high_endpoint): + """ + For a 2d array, add a 2d array as the lower and upper endpoints. + + Parameters + ---------- + array_2d : Numpy array + 2d array of values + low_endpoint : Number + Number of used to create a 2d array of a constant value + as the lower endpoint. + high_endpoint : Number + Number of used to create a 2d array of a constant value + as the upper endpoint. + """ + lower_array = ( + np.full((array_2d.shape[0], 1), low_endpoint)) + upper_array = ( + np.full((array_2d.shape[0], 1), high_endpoint)) + array_2d = np.concatenate( + (lower_array, array_2d, upper_array), axis=1) + return array_2d + + def create_percentiles(no_of_percentiles, sampling="quantile"): """ Function to create percentiles. From c47e0cab4672d940dcdacb0d546b66f21fbd15cb Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 08:48:02 +0100 Subject: [PATCH 0113/1367] Latest changes following refactoring to pull out utilities and and correct tests following previous commit. --- .../ensemble_copula_coupling.py | 117 +++-- .../ensemble_copula_coupling_utilities.py | 104 +++-- .../helper_functions_ensemble_calibration.py | 93 +++- ...oupling_EnsembleCopulaCouplingUtilities.py | 173 +++++++- ...ng_GeneratePercentilesFromProbabilities.py | 101 +---- ...ble_copula_coupling_ResamplePercentiles.py | 398 +++++++++--------- 6 files changed, 612 insertions(+), 374 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 167b4f4ea5..31400e0246 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -37,6 +37,7 @@ import iris +from iris.exceptions import CoordinateNotFoundError import cf_units as unit from improver.ensemble_calibration.ensemble_calibration_utilities import ( @@ -44,10 +45,48 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ import bounds_for_ecdf, units_of_bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ - import (concatenate_2d_array_with_2darray_endpoints, + import (concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, create_percentiles, get_bounds_of_distribution, - insert_lower_and_upper_endpoint_to_1d_array) + insert_lower_and_upper_endpoint_to_1d_array, + reshape_array_to_have_probabilistic_dimension_at_the_front) + + +class RebadgePercentilesAsMembers(object): + """ + Class to rebadge percentiles as ensemble realizations. + This will allow the quantisation to percentiles to be completed, without + a subsequent EnsembleReordering step to restore spatial correlations, + if required. + """ + def __init__(self): + """ + Initialise the class. + """ + pass + + def process(self, cube): + """ + Rebadge percentiles as ensemble members. The ensemble member numbering + will depend upon the number of percentiles in the input cube i.e. + 0, 1, 2, 3, ..., n, if there are n percentiles. + + Parameters + ---------- + cube : Iris.cube.Cube + Cube containing a percentile coordinate, which will be rebadged as + ensemble member. + + """ + if not cube.coords("percentile"): + msg = ("The percentile coordinate could not be found within" + "the input cube: {}.".format(cube)) + raise CoordinateNotFoundError(msg) + + plen = len(cube.coord("percentile").points) + cube.coord("percentile").points = np.arange(plen) + cube.coord("percentile").rename("realization") + return cube class ResamplePercentiles(object): @@ -92,8 +131,8 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( """ lower_bound, upper_bound = bounds_pairing percentiles = insert_lower_and_upper_endpoint_to_1d_array( - percentiles, percentiles, 0, 1) - forecast_at_percentiles = concatenate_2d_array_with_2darray_endpoints( + percentiles, 0, 1) + forecast_at_percentiles = concatenate_2d_array_with_2d_array_endpoints( forecast_at_percentiles, lower_bound, upper_bound) if np.any(np.diff(forecast_at_percentiles) < 0): msg = ("The end points added to the forecast at percentiles " @@ -104,10 +143,15 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( "bounds {}".format( forecast_at_percentiles, bounds_pairing)) raise ValueError(msg) + if np.any(np.diff(percentiles) < 0): + msg = ("The percentiles must be in ascending order." + "The input percentiles were {}".format(percentiles)) + raise ValueError(msg) return percentiles, forecast_at_percentiles def _sample_percentiles( - self, forecast_at_percentiles, desired_percentiles, bounds_pairing): + self, forecast_at_percentiles, desired_percentiles, + bounds_pairing): """ Interpolation of forecast for a set of percentiles from an initial set of percentiles to a new set of percentiles. This is constructed @@ -132,44 +176,38 @@ def _sample_percentiles( """ original_percentiles = ( - forecast_at_percentiles.coord("percentiles").points) + forecast_at_percentiles.coord("percentile").points) forecast_at_reshaped_percentiles = convert_cube_data_to_2d( - forecast_at_percentiles, coord="percentiles") + forecast_at_percentiles, coord="percentile") original_percentiles, forecast_at_reshaped_percentiles = ( - _add_bounds_to_percentiles_and_forecast_at_percentiles( + self._add_bounds_to_percentiles_and_forecast_at_percentiles( original_percentiles, forecast_at_reshaped_percentiles, bounds_pairing)) forecast_at_interpolated_percentiles = ( np.empty( (forecast_at_reshaped_percentiles.shape[0], - len(original_percentiles)))) + len(desired_percentiles)))) for index in range(forecast_at_reshaped_percentiles.shape[0]): forecast_at_interpolated_percentiles[index, :] = np.interp( - desired_percentiles, - forecast_at_reshaped_percentiles[index, :], - originial_percentiles) + desired_percentiles, original_percentiles, + forecast_at_reshaped_percentiles[index, :]) # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. - shape_to_reshape_to = list(forecast_at_percentiles.shape) - if forecast_at_percentiles.coord_dims("percentiles"): - pat_coord_position = ( - forecast_at_percentiles.coord_dims("percentiles")) - shape_to_reshape_to.pop(pat_coord_position[0]) - shape_to_reshape_to = [len(desired_percentiles)] + shape_to_reshape_to - - forecast_at_percentiles = ( - forecast_at_percentiles.reshape(shape_to_reshape_to)) + forecast_at_percentiles_data = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + forecast_at_interpolated_percentiles, forecast_at_percentiles, + "percentile", len(desired_percentiles))) for template_cube in forecast_at_percentiles.slices_over( - "percentiles"): - template_cube.remove_coord("percentiles") + "percentile"): + template_cube.remove_coord("percentile") break percentile_cube = create_cube_with_percentiles( - desired_percentiles, template_cube, forecast_at_percentiles) + desired_percentiles, template_cube, forecast_at_percentiles_data) percentile_cube.cell_methods = {} return percentile_cube @@ -210,13 +248,15 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, if no_of_percentiles is None: no_of_percentiles = ( - len(forecast_at_percentiles.coord("percentiles").points)) + len(forecast_at_percentiles.coord("percentile").points)) percentiles = create_percentiles( no_of_percentiles, sampling=sampling) + cube_units = forecast_at_percentiles.units bounds_pairing = ( - get_bounds_of_distribution(forecast_at_percentiles, "percentiles")) + get_bounds_of_distribution( + forecast_at_percentiles.name(), cube_units)) forecast_at_percentiles = self._sample_percentiles( forecast_at_percentiles, percentiles, bounds_pairing) @@ -275,7 +315,7 @@ def _add_bounds_to_thresholds_and_probabilities( lower_bound, upper_bound = bounds_pairing threshold_points = insert_lower_and_upper_endpoint_to_1d_array( threshold_points, lower_bound, upper_bound) - probabilities_for_cdf = concatenate_2d_array_with_2darray_endpoints( + probabilities_for_cdf = concatenate_2d_array_with_2d_array_endpoints( probabilities_for_cdf, 0, 1) if np.any(np.diff(threshold_points) < 0): msg = ("The end points added to the threshold values for " @@ -332,7 +372,7 @@ def _probabilities_to_percentiles( raise ValueError(msg) threshold_points, probabilities_for_cdf = ( - _add_bounds_to_thresholds_and_probabilities( + self._add_bounds_to_thresholds_and_probabilities( threshold_points, probabilities_for_cdf, bounds_pairing)) forecast_at_percentiles = ( @@ -344,16 +384,10 @@ def _probabilities_to_percentiles( # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. - shape_to_reshape_to = list(forecast_probabilities.shape) - if forecast_probabilities.coord_dims("probability_above_threshold"): - pat_coord_position = ( - forecast_probabilities.coord_dims( - "probability_above_threshold")) - shape_to_reshape_to.pop(pat_coord_position[0]) - shape_to_reshape_to = [len(percentiles)] + shape_to_reshape_to - forecast_at_percentiles = ( - forecast_at_percentiles.reshape(shape_to_reshape_to)) + reshape_array_to_have_probabilistic_dimension_at_the_front( + forecast_at_percentiles, forecast_probabilities, + "probability_above_threshold", len(percentiles))) for template_cube in forecast_probabilities.slices_over( "probability_above_threshold"): @@ -409,9 +443,11 @@ def process(self, forecast_probabilities, no_of_percentiles=None, percentiles = create_percentiles( no_of_percentiles, sampling=sampling) + cube_units = ( + forecast_probabilities.coord("probability_above_threshold").units) bounds_pairing = ( get_bounds_of_distribution( - forecast_probabilities, "probability_above_threshold")) + forecast_probabilities.name(), cube_units)) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) @@ -497,6 +533,11 @@ def _mean_and_variance_to_percentiles( # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. + result = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + result, calibrated_forecast_predictor, + "realization", len(percentiles))) + shape_to_reshape_to = list(calibrated_forecast_predictor.shape) if calibrated_forecast_predictor.coord_dims("realization"): realization_coord_position = ( diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index d233441d9d..09e4cfecba 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -39,28 +39,13 @@ import cf_units as unit import iris +from iris.exceptions import CoordinateNotFoundError +from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ + import bounds_for_ecdf, units_of_bounds_for_ecdf -def insert_lower_and_upper_endpoint_to_1d_array( - array_1d, low_endpoint, high_endpoint): - """ - For a 1d array, add a lower and upper endpoint. - Parameters - ---------- - array_1d : Numpy array - 1d array of values - low_endpoint : Number - Number of use as the lower endpoint. - high_endpoint : Number - Number of use as the upper endpoint. - """ - percentiles = np.insert(percentiles, 0, low_endpoint) - percentiles = np.append(percentiles, high_endpoint) - return percentiles - - -def concatenate_2d_array_with_2darray_endpoints( +def concatenate_2d_array_with_2d_array_endpoints( array_2d, low_endpoint, high_endpoint): """ For a 2d array, add a 2d array as the lower and upper endpoints. @@ -193,7 +178,7 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): return result -def get_bounds_of_distribution(forecast_cube, coord_for_units): +def get_bounds_of_distribution(cube_name, cube_units): """ Gets the bounds of the distribution and converts the units of the bounds_pairing to the units of the forecast. @@ -205,10 +190,11 @@ def get_bounds_of_distribution(forecast_cube, coord_for_units): Parameters ---------- - forecast_cube : Iris Cube - Input cube containing the coordinate from which the units - of the bounds_pairing should be converted to. - coordinate. + cube_name : String + Name of cube, which is used as the key for the bounds_for_ecdf and + units_of_bounds_for_ecdf dictionaries. + cube_units : cf_units.Unit + Units to which the bounds_pairing will be converted. Returns ------- @@ -218,22 +204,82 @@ def get_bounds_of_distribution(forecast_cube, coord_for_units): the same units as the input cube. """ - cube_units = ( - forecast_cube.coord(coord_for_units).units) # Extract bounds from dictionary of constants. try: - bounds_pairing = bounds_for_ecdf[forecast_cube.name()] + bounds_pairing = bounds_for_ecdf[cube_name] bounds_pairing_units = ( - units_of_bounds_for_ecdf[forecast_cube.name()]) + units_of_bounds_for_ecdf[cube_name]) except KeyError as err: msg = ("The forecast_cube name: {} is not recognised" "within bounds_for_ecdf {} or " "units_of_bounds_for_ecdf: {}. \n" "Error: {}".format( - forecast_cube.name(), bounds_for_ecdf, + cube_name, bounds_for_ecdf, units_of_bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( np.array(bounds_pairing), cube_units) return bounds_pairing + + +def insert_lower_and_upper_endpoint_to_1d_array( + array_1d, low_endpoint, high_endpoint): + """ + For a 1d array, add a lower and upper endpoint. + + Parameters + ---------- + array_1d : Numpy array + 1d array of values + low_endpoint : Number + Number of use as the lower endpoint. + high_endpoint : Number + Number of use as the upper endpoint. + """ + array_1d = np.insert(array_1d, 0, low_endpoint) + array_1d = np.append(array_1d, high_endpoint) + return array_1d + + +def reshape_array_to_have_probabilistic_dimension_at_the_front( + array_to_reshape, original_cube, input_probabilistic_dimension_name, + output_probabilistic_dimension_length): + """ + Reshape a 2d array, so the ensemble or probabilistic dimension + e.g. percentile, or probability is first, and any other dimension + coordinates follow. + + Parameters + ---------- + array_to_reshape : Numpy array + The array that requires reshaping. + original_cube : Iris.cube.Cube + Cube containing the desired shape to be reshaped to, apart from the + ensemble dimension, for example, + [ensemble_dimension, time, y, x]. + input_probabilistic_dimension_name : String + Name of the dimension within the original cube, which represents the + probabilistic dimension. + output_probabilistic_dimension_length : Integer + Length of the probabilistic dimension, which will be used to create + the shape to which the array_to_reshape will be reshaped to. + + """ + shape_to_reshape_to = list(original_cube.shape) + if original_cube.coords( + input_probabilistic_dimension_name, dim_coords=True): + pat_coord_position = ( + original_cube.coord_dims(input_probabilistic_dimension_name)) + shape_to_reshape_to.pop(pat_coord_position[0]) + elif original_cube.coords( + input_probabilistic_dimension_name, dim_coords=False): + pass + else: + msg = ("A {} coordinate is not available on the {} cube.".format( + input_probabilistic_dimension_name, original_cube)) + raise CoordinateNotFoundError(msg) + + shape_to_reshape_to = ( + [output_probabilistic_dimension_length] + shape_to_reshape_to) + return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 672ba5c99b..16ccf850be 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -43,8 +43,91 @@ concatenate_cubes) +def set_up_probability_above_threshold_cube( + data, phenomenon_standard_name, phenomenon_units, + forecast_thresholds=[8, 10, 12], + y_dimension_length=3, x_dimension_length=3): + """Create a cube containing multiple realizations.""" + cube = Cube(data, standard_name=phenomenon_standard_name, + units=phenomenon_units) + cube.add_dim_coord( + DimCoord(forecast_thresholds, + long_name='probability_above_threshold', units='degreesC'), 0) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_dim_coord(DimCoord([402192.5], + "time", units=tunit), 1) + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), + 'latitude', units='degrees'), 2) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, x_dimension_length), + 'longitude', units='degrees'), 3) + return cube + + +def set_up_probability_above_threshold_temperature_cube(): + """Create a cube with metadata and values suitable for air temperature.""" + data = np.array([[[[1.0, 0.9, 1.0], + [0.8, 0.9, 0.5], + [0.5, 0.2, 0.0]]], + [[[1.0, 0.5, 1.0], + [0.5, 0.5, 0.3], + [0.2, 0.0, 0.0]]], + [[[1.0, 0.2, 0.5], + [0.2, 0.0, 0.1], + [0.0, 0.0, 0.0]]]]) + return ( + set_up_probability_above_threshold_cube(data, "air_temperature", "1")) + + +def set_up_probability_above_threshold_spot_cube( + data, phenomenon_standard_name, phenomenon_units, + forecast_thresholds=[8, 10, 12], + y_dimension_length=9, x_dimension_length=9): + """ + Create a cube containing multiple realizations, where one of the + dimensions is an index used for spot forecasts. + """ + cube = Cube(data, standard_name=phenomenon_standard_name, + units=phenomenon_units) + cube.add_dim_coord( + DimCoord(forecast_thresholds, + long_name='probability_above_threshold', units='degreesC'), 0) + time_origin = "hours since 1970-01-01 00:00:00" + calendar = "gregorian" + tunit = Unit(time_origin, calendar) + cube.add_dim_coord(DimCoord([402192.5], + "time", units=tunit), 1) + cube.add_dim_coord(DimCoord(np.arange(9), long_name='locnum', + units="1"), 2) + cube.add_aux_coord(AuxCoord(np.linspace(-45.0, 45.0, y_dimension_length), + 'latitude', units='degrees'), data_dims=2) + cube.add_aux_coord(AuxCoord(np.linspace(120, 180, x_dimension_length), + 'longitude', units='degrees'), data_dims=2) + return cube + + +def set_up_probability_above_threshold_spot_temperature_cube(): + """ + Create a cube with metadata and values suitable for air temperature + for spot forecasts. + """ + data = np.array([[[1.0, 0.9, 1.0, + 0.8, 0.9, 0.5, + 0.5, 0.2, 0.0]], + [[1.0, 0.5, 1.0, + 0.5, 0.5, 0.3, + 0.2, 0.0, 0.0]], + [[1.0, 0.2, 0.5, + 0.2, 0.0, 0.1, + 0.0, 0.0, 0.0]]]) + return set_up_probability_above_threshold_spot_cube( + data, "air_temperature", "1") + + def set_up_cube(data, phenomenon_standard_name, phenomenon_units, - realizations=[0, 1, 2]): + realizations=[0, 1, 2], y_dimension_length=3, + x_dimension_length=3): """Create a cube containing multiple realizations.""" cube = Cube(data, standard_name=phenomenon_standard_name, units=phenomenon_units) @@ -55,10 +138,10 @@ def set_up_cube(data, phenomenon_standard_name, phenomenon_units, tunit = Unit(time_origin, calendar) cube.add_dim_coord(DimCoord([402192.5], "time", units=tunit), 1) - cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, 3), 'latitude', - units='degrees'), 2) - cube.add_dim_coord(DimCoord(np.linspace(120, 180, 3), 'longitude', - units='degrees'), 3) + cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), + 'latitude', units='degrees'), 2) + cube.add_dim_coord(DimCoord(np.linspace(120, 180, x_dimension_length), + 'longitude', units='degrees'), 3) return cube diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index bf80252f54..26c2ae60cc 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -41,10 +41,161 @@ import numpy as np from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ - import create_percentiles, create_cube_with_percentiles + import (create_percentiles, create_cube_with_percentiles, + insert_lower_and_upper_endpoint_to_1d_array, + concatenate_2d_array_with_2d_array_endpoints, + get_bounds_of_distribution, + reshape_array_to_have_probabilistic_dimension_at_the_front) +from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ + import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import ( set_up_temperature_cube, set_up_spot_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + _add_forecast_reference_time_and_forecast_period, + set_up_probability_above_threshold_temperature_cube) + + +class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( + IrisTest): + + """Test the insert_lower_and_upper_endpoint_to_1d_array.""" + + def setUp(self): + """Set up temperature cube.""" + cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + percentile_points = np.arange(len(cube.coord("realization").points)) + cube.coord("realization").points = percentile_points + cube.coord("realization").rename("percentile") + self.current_temperature_forecast_cube = cube + + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "percentile", plen)) + self.assertIsInstance(reshaped_array, np.ndarray) + + def test_size_of_array(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "percentile", plen)) + self.assertEqual(reshaped_array.shape[0], plen) + self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) + + def test_percentile_is_not_a_dimension_coordinate(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + for cube_slice in cube.slices_over("percentile"): + break + input_array = cube_slice.data + plen = len(cube_slice.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube_slice.data, cube_slice, "percentile", plen)) + + def test_missing_coordinate(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + msg = "coordinate is not available" + with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "nonsense", plen) + + +class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): + + """Test the insert_lower_and_upper_endpoint_to_1d_array.""" + + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + expected = [0, 0.2, 0.5, 0.8, 1] + percentiles = [0.2, 0.5, 0.8] + result = insert_lower_and_upper_endpoint_to_1d_array( + percentiles, 0, 1) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_another_example(self): + """ + Another basic test that the result is a numpy array with the + expected contents. + """ + expected = [-100, -40, 200, 1000, 10000] + percentiles = [-40, 200, 1000] + result = insert_lower_and_upper_endpoint_to_1d_array( + percentiles, -100, 10000) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + +class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): + + """Test the concatenate_2d_array_with_2d_array_endpoints.""" + + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) + percentiles = np.array([[0.2, 0.5, 0.8]]) + result = concatenate_2d_array_with_2d_array_endpoints( + percentiles, 0, 1) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_another_example(self): + """ + Another basic test that the result is a numpy array with the + expected contents. + """ + expected = np.array([[-100, -40, 200, 1000, 10000]]) + percentiles = np.array([[-40, 200, 1000]]) + result = concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_1d_input(self): + """ + Test that a 1d input array results in the expected error. + """ + expected = np.array([-100, -40, 200, 1000, 10000]) + percentiles = np.array([-40, 200, 1000]) + msg = "all the input arrays must have same number of dimensions" + with self.assertRaisesRegexp(ValueError, msg): + concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) + + def test_3d_input(self): + """ + Test that a 3d input array results in the expected error. + """ + expected = np.array([[[-100, -40, 200, 1000, 10000]]]) + percentiles = np.array([[[-40, 200, 1000]]]) + msg = "all the input arrays must have same number of dimensions" + with self.assertRaisesRegexp(ValueError, msg): + concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) class Test_create_cube_with_percentiles(IrisTest): @@ -259,12 +410,13 @@ class Test_get_bounds_of_distribution(IrisTest): def setUp(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) + set_up_probability_above_threshold_temperature_cube())) def test_basic(self): """Test that the result is a numpy array.""" cube = self.current_temperature_forecast_cube - result = get_bounds_of_distribution(cube) + cube_units = cube.coord("probability_above_threshold").units + result = get_bounds_of_distribution(cube.name(), cube_units) self.assertIsInstance(result, np.ndarray) def test_check_data(self): @@ -272,8 +424,10 @@ def test_check_data(self): Test that the expected results are returned for the bounds_pairing. """ cube = self.current_temperature_forecast_cube + cube_units = cube.coord("probability_above_threshold").units bounds_pairing = (-40, 50) - result = get_bounds_of_distribution(cube) + result = ( + get_bounds_of_distribution(cube.name(), cube_units)) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_unit_conversion(self): @@ -284,8 +438,10 @@ def test_check_unit_conversion(self): """ cube = self.current_temperature_forecast_cube cube.coord("probability_above_threshold").convert_units("fahrenheit") + cube_units = cube.coord("probability_above_threshold").units bounds_pairing = (-40, 122) # In fahrenheit - result = get_bounds_of_distribution(cube) + result = ( + get_bounds_of_distribution(cube.name(), cube_units)) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_exception_is_raised(self): @@ -295,9 +451,10 @@ def test_check_exception_is_raised(self): cube = self.current_temperature_forecast_cube cube.standard_name = None cube.long_name = "Nonsense" - msg = "The forecast_probabilities name" + cube_units = cube.coord("probability_above_threshold").units + msg = "The forecast_cube name" with self.assertRaisesRegexp(KeyError, msg): - get_bounds_of_distribution(cube) + get_bounds_of_distribution(cube.name(), cube_units) if __name__ == '__main__': diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 0e0a9247f9..d4483ceaa8 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -46,85 +46,11 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import( - _add_forecast_reference_time_and_forecast_period) - - -def set_up_cube(data, phenomenon_standard_name, phenomenon_units, - forecast_thresholds=[8, 10, 12], - y_dimension_length=3, x_dimension_length=3): - """Create a cube containing multiple realizations.""" - cube = Cube(data, standard_name=phenomenon_standard_name, - units=phenomenon_units) - cube.add_dim_coord( - DimCoord(forecast_thresholds, - long_name='probability_above_threshold', units='degreesC'), 0) - time_origin = "hours since 1970-01-01 00:00:00" - calendar = "gregorian" - tunit = Unit(time_origin, calendar) - cube.add_dim_coord(DimCoord([402192.5], - "time", units=tunit), 1) - cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), - 'latitude', units='degrees'), 2) - cube.add_dim_coord(DimCoord(np.linspace(120, 180, x_dimension_length), - 'longitude', units='degrees'), 3) - return cube - - -def set_up_temperature_cube(): - """Create a cube with metadata and values suitable for air temperature.""" - data = np.array([[[[1.0, 0.9, 1.0], - [0.8, 0.9, 0.5], - [0.5, 0.2, 0.0]]], - [[[1.0, 0.5, 1.0], - [0.5, 0.5, 0.3], - [0.2, 0.0, 0.0]]], - [[[1.0, 0.2, 0.5], - [0.2, 0.0, 0.1], - [0.0, 0.0, 0.0]]]]) - return set_up_cube(data, "air_temperature", "1") - - -def set_up_spot_cube(data, phenomenon_standard_name, phenomenon_units, - forecast_thresholds=[8, 10, 12], - y_dimension_length=9, x_dimension_length=9): - """ - Create a cube containing multiple realizations, where one of the - dimensions is an index used for spot forecasts. - """ - cube = Cube(data, standard_name=phenomenon_standard_name, - units=phenomenon_units) - cube.add_dim_coord( - DimCoord(forecast_thresholds, - long_name='probability_above_threshold', units='degreesC'), 0) - time_origin = "hours since 1970-01-01 00:00:00" - calendar = "gregorian" - tunit = Unit(time_origin, calendar) - cube.add_dim_coord(DimCoord([402192.5], - "time", units=tunit), 1) - cube.add_dim_coord(DimCoord(np.arange(9), long_name='locnum', - units="1"), 2) - cube.add_aux_coord(AuxCoord(np.linspace(-45.0, 45.0, y_dimension_length), - 'latitude', units='degrees'), data_dims=2) - cube.add_aux_coord(AuxCoord(np.linspace(120, 180, x_dimension_length), - 'longitude', units='degrees'), data_dims=2) - return cube - - -def set_up_spot_temperature_cube(): - """ - Create a cube with metadata and values suitable for air temperature - for spot forecasts. - """ - data = np.array([[[1.0, 0.9, 1.0, - 0.8, 0.9, 0.5, - 0.5, 0.2, 0.0]], - [[1.0, 0.5, 1.0, - 0.5, 0.5, 0.3, - 0.2, 0.0, 0.0]], - [[1.0, 0.2, 0.5, - 0.2, 0.0, 0.1, - 0.0, 0.0, 0.0]]]) - return set_up_spot_cube(data, "air_temperature", "1") + _add_forecast_reference_time_and_forecast_period, + set_up_probability_above_threshold_cube, + set_up_probability_above_threshold_temperature_cube, + set_up_probability_above_threshold_spot_cube, + set_up_probability_above_threshold_spot_temperature_cube) class Test__add_bounds_to_thresholds_and_probabilities(IrisTest): @@ -137,7 +63,7 @@ class Test__add_bounds_to_thresholds_and_probabilities(IrisTest): def setUp(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) + set_up_probability_above_threshold_temperature_cube())) def test_basic(self): """Test that the plugin returns two numpy arrays.""" @@ -212,10 +138,10 @@ def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) + set_up_probability_above_threshold_temperature_cube())) self.current_temperature_spot_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_spot_temperature_cube())) + set_up_probability_above_threshold_spot_temperature_cube())) def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" @@ -242,7 +168,7 @@ def test_simple_check_data(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_cube( + set_up_probability_above_threshold_cube( data, "air_temperature", "1", forecast_thresholds=[8, 10, 12], y_dimension_length=1, x_dimension_length=1))) @@ -265,7 +191,7 @@ def test_probabilities_not_monotonically_increasing(self): self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_cube( + set_up_probability_above_threshold_cube( data, "air_temperature", "1", forecast_thresholds=[8, 10, 12], y_dimension_length=1, x_dimension_length=1))) @@ -363,8 +289,9 @@ def test_lots_of_probability_thresholds(self): temperature_values = np.arange(0, 30) cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_cube(input_probs, "air_temperature", "1", - forecast_thresholds=temperature_values))) + set_up_probability_above_threshold_cube( + input_probs, "air_temperature", "1", + forecast_thresholds=temperature_values))) percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() @@ -477,7 +404,7 @@ def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) + set_up_probability_above_threshold_temperature_cube())) def test_check_data_specifying_percentiles(self): """ diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index c20aa84186..e49dd712a6 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -31,6 +31,23 @@ """ Unit tests for the `ensemble_copula_coupling.ResamplePercentiles` class. """ +import numpy as np +import unittest + +from cf_units import Unit +from iris.coords import AuxCoord, DimCoord +from iris.cube import Cube +from iris.tests import IrisTest + +from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( + ResamplePercentiles as Plugin) +from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ + import bounds_for_ecdf +from improver.tests.helper_functions_ensemble_calibration import( + _add_forecast_reference_time_and_forecast_period, + set_up_cube, set_up_temperature_cube, set_up_spot_cube, + set_up_spot_temperature_cube) + class Test__add_bounds_to_percentiles_and_forecast_values(IrisTest): @@ -67,8 +84,7 @@ def test_basic(self): def test_bounds_of_percentiles(self): """ Test that the plugin returns the expected results for the - percentiles, where they've been padded with the values from - the bounds_pairing. + percentiles, where the percentile values have been padded with 0 and 1. """ cube = self.percentile_cube percentiles = cube.coord("percentile").points @@ -77,38 +93,55 @@ def test_bounds_of_percentiles(self): plugin = Plugin() result = plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( percentiles, forecast_at_percentiles, bounds_pairing) - self.assertArrayAlmostEqual(result[0][0], bounds_pairing[0]) - self.assertArrayAlmostEqual(result[0][-1], bounds_pairing[1]) + self.assertArrayAlmostEqual(result[0][0], 0) + self.assertArrayAlmostEqual(result[0][-1], 1) def test_probability_data(self): """ Test that the plugin returns the expected results for the - probabilities, where they've been padded with zeros and ones to - represent the extreme ends of the Cumulative Distribution Function. + forecast values, where they've been padded with the values from the + bounds_pairing. """ cube = self.percentile_cube percentiles = cube.coord("percentile").points forecast_at_percentiles = cube.data.reshape(3, 9) - zero_array = np.zeros(forecast_at_percentiles[:, 0].shape) - one_array = np.ones(forecast_at_percentiles[:, 0].shape) bounds_pairing = (-40, 50) + lower_array = np.full( + forecast_at_percentiles[:, 0].shape, bounds_pairing[0]) + upper_array = np.full( + forecast_at_percentiles[:, 0].shape, bounds_pairing[1]) plugin = Plugin() result = plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( percentiles, forecast_at_percentiles, bounds_pairing) - self.assertArrayAlmostEqual(result[1][:, 0], zero_array) - self.assertArrayAlmostEqual(result[1][:, -1], one_array) + self.assertArrayAlmostEqual(result[1][:, 0], lower_array) + self.assertArrayAlmostEqual(result[1][:, -1], upper_array) def test_endpoints_of_distribution_exceeded(self): """ Test that the plugin raises a ValueError when the constant - end points of the distribution are exceeded by a threshold value - used in the forecast. + end points of the distribution are exceeded by a forecast value. + The end points must be outside the minimum and maximum within the + forecast values. """ - forecast_at_percentiles = np.array([[0.05, 0.7, 0.95]]) - percentiles = np.array([8, 10, 60]) + forecast_at_percentiles = np.array([[8, 10, 60]]) + percentiles = np.array([0.05, 0.7, 0.95]) bounds_pairing = (-40, 50) plugin = Plugin() - msg = "The end points added to the threshold values for" + msg = "The end points added to the forecast at percentiles" + with self.assertRaisesRegexp(ValueError, msg): + plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( + percentiles, forecast_at_percentiles, bounds_pairing) + + def test_percentiles_not_ascending(self): + """ + Test that the plugin raises a ValueError, if the percentiles are + not in ascending order. + """ + forecast_at_percentiles = np.array([[8, 10, 12]]) + percentiles = np.array([100, 0, -100]) + bounds_pairing = (-40, 50) + plugin = Plugin() + msg = "The percentiles must be in ascending order" with self.assertRaisesRegexp(ValueError, msg): plugin._add_bounds_to_percentiles_and_forecast_at_percentiles( percentiles, forecast_at_percentiles, bounds_pairing) @@ -126,164 +159,147 @@ def setUp(self): data[1] += 1 data[2] += 3 cube = set_up_cube(data, "air_temperature", "degreesC") - self.realization_cube = ( - _add_forecast_reference_time_and_forecast_period(cube.copy())) cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( _add_forecast_reference_time_and_forecast_period(cube)) + spot_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_spot_temperature_cube())) + spot_cube.convert_units("degreesC") + spot_cube.coord("realization").rename("percentile") + spot_cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) + spot_cube.data = np.tile(np.linspace(5, 10, 3), 9).reshape(3, 1, 9) + self.spot_percentile_cube = spot_cube def test_basic(self): """Test that the plugin returns an Iris.cube.Cube.""" - cube = self.current_temperature_forecast_cube + cube = self.percentile_cube percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertIsInstance(result, Cube) def test_simple_check_data(self): """ Test that the plugin returns an Iris.cube.Cube with the expected - data values for the percentiles. - - The input cube contains probabilities greater than a given threshold. + forecast values at each percentile. """ - expected = np.array([8.15384615, 9.38461538, 11.6]) + expected = np.array([8, 10, 12]) expected = expected[:, np.newaxis, np.newaxis, np.newaxis] - data = np.array([0.95, 0.3, 0.05]) + data = np.array([8, 10, 12]) data = data[:, np.newaxis, np.newaxis, np.newaxis] - self.current_temperature_forecast_cube = ( + current_temperature_forecast_cube = ( _add_forecast_reference_time_and_forecast_period( set_up_cube( data, "air_temperature", "1", - forecast_thresholds=[8, 10, 12], y_dimension_length=1, - x_dimension_length=1))) - cube = self.current_temperature_forecast_cube + y_dimension_length=1, x_dimension_length=1))) + cube = current_temperature_forecast_cube + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, expected) - def test_probabilities_not_monotonically_increasing(self): - """ - Test that the plugin raises a ValueError when the probabilities - of the Cumulative Distribution Function are not monotonically - increasing. - """ - data = np.array([0.05, 0.7, 0.95]) - data = data[:, np.newaxis, np.newaxis, np.newaxis] - - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_cube( - data, "air_temperature", "1", - forecast_thresholds=[8, 10, 12], y_dimension_length=1, - x_dimension_length=1))) - cube = self.current_temperature_forecast_cube - percentiles = [0.1, 0.5, 0.9] - bounds_pairing = (-40, 50) - plugin = Plugin() - msg = "The probability values used to construct the" - with self.assertRaisesRegexp(ValueError, msg): - plugin._probabilities_to_percentiles( - cube, percentiles, bounds_pairing) - - def test_result_cube_has_no_probability_above_threshold_coordinate(self): - """ - Test that the plugin returns a cube with coordinates that - do not include the probability_above_threshold coordinate. - """ - cube = self.current_temperature_forecast_cube - percentiles = [0.1, 0.5, 0.9] - bounds_pairing = (-40, 50) - plugin = Plugin() - result = plugin._probabilities_to_percentiles( - cube, percentiles, bounds_pairing) - for coord in result.coords(): - self.assertNotEqual(coord.name(), "probability_above_threshold") - def test_check_data(self): """ Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles. """ - data = np.array([[[[15.8, 31., 46.2], - [8., 10., 31.], - [10.4, 12., 42.4]]], - [[[-16., 10, 31.], - [8., 10., 11.6], - [-30.4, 8., 12.]]], - [[[-30.4, 8., 11.], - [-34., -10., 9], - [-35.2, -16., 3.2]]]]) - - cube = self.current_temperature_forecast_cube - percentiles = [0.1, 0.5, 0.9] + data = np.array([[[[4.5, 6.5, 7.5], + [5.125, 7.125, 8.125], + [5.75, 7.75, 8.75]]], + [[[6.375, 8.375, 9.375], + [7., 9., 10.], + [7.625, 9.625, 10.625]]], + [[[8.25, 10.25, 11.25], + [8.875, 10.875, 11.875], + [9.5, 11.5, 12.5]]]]) + + cube = self.percentile_cube + percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) def test_check_single_threshold(self): """ Test that the plugin returns an Iris.cube.Cube with the expected - data values for the percentiles, if a single threshold is used for - constructing the percentiles. + data values for the percentiles, if a single percentile is used within + the input set of percentiles. """ - data = np.array([[[[12.2, 29., 45.8], - [8., 26.66666667, 45.33333333], - [12.2, 29., 45.8]]], - [[[-16., 23.75, 44.75], - [8., 26.66666667, 45.33333333], - [-30.4, 8., 41.6]]], - [[[-30.4, 8., 41.6], - [-34., -10., 29.], - [-35.2, -16., 3.2]]]]) - - for acube in self.current_temperature_forecast_cube.slices_over( - "probability_above_threshold"): + expected = np.array([[[[4., 24.44444444, 44.88888889], + [4.625, 24.79166667, 44.95833333], + [5.25, 25.13888889, 45.02777778]]], + [[[5.875, 25.48611111, 45.09722222], + [6.5, 25.83333333, 45.16666667], + [7.125, 26.18055556, 45.23611111]]], + [[[7.75, 26.52777778, 45.30555556], + [8.375, 26.875, 45.375], + [9., 27.22222222, 45.44444444]]]]) + + data = np.array([8]) + data = data[:, np.newaxis, np.newaxis, np.newaxis] + + current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_cube( + data, "air_temperature", "1", + realizations=[0], + y_dimension_length=1, x_dimension_length=1))) + cube = current_temperature_forecast_cube + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.2]) + + for acube in self.percentile_cube.slices_over("percentile"): cube = acube break percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) - self.assertArrayAlmostEqual(result.data, data) + self.assertArrayAlmostEqual(result.data, expected) - def test_lots_of_probability_thresholds(self): + def test_lots_of_input_percentiles(self): """ Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles, if there are lots of thresholds. """ - input_probs_1d = np.linspace(1, 0, 30) - input_probs = np.tile(input_probs_1d, (3, 3, 1, 1)).T - - data = np.array([[[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]], - [[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]], - [[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]]]) - - temperature_values = np.arange(0, 30) + input_forecast_values_1d = np.linspace(10, 20, 30) + input_forecast_values = np.tile(input_forecast_values_1d, (3, 3, 1, 1)).T + #print "input_percentiles = ", input_percentiles + + data = np.array([[[[11., 15., 19.], + [11., 15., 19.], + [11., 15., 19.]]], + [[[11., 15., 19.], + [11., 15., 19.], + [11., 15., 19.]]], + [[[11., 15., 19.], + [11., 15., 19.], + [11., 15., 19.]]]]) + + percentiles_values = np.linspace(0, 1, 30) cube = ( _add_forecast_reference_time_and_forecast_period( - set_up_cube(input_probs, "air_temperature", "1", - forecast_thresholds=temperature_values))) + set_up_cube(input_forecast_values, "air_temperature", "1", + realizations=np.arange(30)))) + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array(percentiles_values) percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -293,68 +309,41 @@ def test_lots_of_percentiles(self): data values for the percentiles, if lots of percentile values are requested. """ - data = np.array([[[[13.9, 15.8, 17.7], - [19.6, 21.5, 23.4], - [25.3, 27.2, 29.1]]], - [[[31., 32.9, 34.8], - [36.7, 38.6, 40.5], - [42.4, 44.3, 46.2]]], - [[[48.1, -16., 8.], - [8.25, 8.5, 8.75], - [9., 9.25, 9.5]]], - [[[9.75, 10., 10.33333333], - [10.66666667, 11., 11.33333333], - [11.66666667, 12., 21.5]]], - [[[31., 40.5, 10.2], - [10.4, 10.6, 10.8], - [11., 11.2, 11.4]]], - [[[11.6, 11.8, 12.], - [15.8, 19.6, 23.4], - [27.2, 31., 34.8]]], - [[[38.6, 42.4, 46.2], - [-28., -16., -4.], - [8., 8.33333333, 8.66666667]]], - [[[9., 9.33333333, 9.66666667], - [10., 10.33333333, 10.66666667], - [11., 11.33333333, 11.66666667]]], - [[[12., 21.5, 31.], - [40.5, -16., 8.], - [8.25, 8.5, 8.75]]], - [[[9., 9.25, 9.5], - [9.75, 10., 10.2], - [10.4, 10.6, 10.8]]], - [[[11., 11.2, 11.4], - [11.6, 11.8, -35.2], - [-30.4, -25.6, -20.8]]], - [[[-16., -11.2, -6.4], - [-1.6, 3.2, 8.], - [8.5, 9., 9.5]]], - [[[10., 10.5, 11.], - [11.5, 12., 31.], - [-35.2, -30.4, -25.6]]], - [[[-20.8, -16., -11.2], - [-6.4, -1.6, 3.2], - [8., 8.33333333, 8.66666667]]], - [[[9., 9.33333333, 9.66666667], - [10., 10.5, 11.], - [11.5, -37., -34.]]], - [[[-31., -28., -25.], - [-22., -19., -16.], - [-13., -10., -7.]]], - [[[-4., -1., 2.], - [5., 8., 8.5], - [9., 9.5, -37.6]]], - [[[-35.2, -32.8, -30.4], - [-28., -25.6, -23.2], - [-20.8, -18.4, -16.]]], - [[[-13.6, -11.2, -8.8], - [-6.4, -4., -1.6], - [0.8, 3.2, 5.6]]]]) - cube = self.current_temperature_forecast_cube - percentiles = np.arange(0.05, 1.0, 0.05) + data = np.array([[[[-18., 4.25, 4.75], + [5.25, 5.75, 6.25], + [6.75, 7.25, 7.75]]], + [[[29., -17.6875, 4.875], + [5.375, 5.875, 6.375], + [6.875, 7.375, 7.875]]], + [[[8.375, 29.3125, -17.375], + [5.5, 6., 6.5], + [7., 7.5, 8.]]], + [[[8.5, 9., 29.625], + [-17.0625, 6.125, 6.625], + [7.125, 7.625, 8.125]]], + [[[8.625, 9.125, 9.625], + [29.9375, -16.75, 6.75], + [7.25, 7.75, 8.25]]], + [[[8.75, 9.25, 9.75], + [10.25, 30.25, -16.4375], + [7.375, 7.875, 8.375]]], + [[[8.875, 9.375, 9.875], + [10.375, 10.875, 30.5625], + [-16.125, 8., 8.5]]], + [[[9., 9.5, 10.], + [10.5, 11., 11.5], + [30.875, -15.8125, 8.625]]], + [[[9.125, 9.625, 10.125], + [10.625, 11.125, 11.625], + [12.125, 31.1875, -15.5]]], + [[[9.25, 9.75, 10.25], + [10.75, 11.25, 11.75], + [12.25, 12.75, 31.5]]]]) + cube = self.percentile_cube + percentiles = np.arange(0.05, 1.0, 0.1) bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -363,20 +352,14 @@ def test_check_data_spot_forecasts(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles for spot forecasts. """ - data = np.array([[[15.8, 31., 46.2, - 8., 10., 31., - 10.4, 12., 42.4]], - [[-16., 10, 31., - 8., 10., 11.6, - -30.4, 8., 12.]], - [[-30.4, 8., 11., - -34., -10., 9, - -35.2, -16., 3.2]]]) - cube = self.current_temperature_spot_forecast_cube + data = np.array([[[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]], + [[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]], + [[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]]]) + cube = self.spot_percentile_cube percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._probabilities_to_percentiles( + result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -391,9 +374,8 @@ def setUp(self): data[1] += 1 data[2] += 3 cube = set_up_cube(data, "air_temperature", "degreesC") - self.realization_cube = ( - _add_forecast_reference_time_and_forecast_period(cube.copy())) cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( _add_forecast_reference_time_and_forecast_period(cube)) @@ -402,21 +384,20 @@ def test_check_data_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for a specific number of percentiles. """ - data = np.array([[[[21.5, 31., 40.5], - [8.75, 10., 11.66666667], - [11., 12., 31.]]], - [[[8.33333333, 10., 11.66666667], - [8.75, 10., 11.], - [-16., 8., 10.5]]], - [[[-16., 8., 9.66666667], - [-25., -10., 5.], - [-28., -16., -4.]]]]) + data = np.array([[[[4.75, 6., 7.25], + [5.375, 6.625, 7.875], + [6., 7.25, 8.5]]], + [[[6.625, 7.875, 9.125], + [7.25, 8.5, 9.75], + [7.875, 9.125, 10.375]]], + [[[8.5, 9.75, 11.], + [9.125, 10.375, 11.625], + [9.75, 11., 12.25]]]]) cube = self.percentile_cube percentiles = [0.25, 0.5, 0.75] plugin = Plugin() - result = plugin.process( - cube, no_of_percentiles=len(percentiles)) + result = plugin.process(cube, no_of_percentiles=len(percentiles)) self.assertArrayAlmostEqual(result.data, data) def test_check_data_not_specifying_percentiles(self): @@ -424,18 +405,21 @@ def test_check_data_not_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values without specifying the number of percentiles. """ - data = np.array([[[[21.5, 31., 40.5], - [8.75, 10., 11.66666667], - [11., 12., 31.]]], - [[[8.33333333, 10., 11.66666667], - [8.75, 10., 11.], - [-16., 8., 10.5]]], - [[[-16., 8., 9.66666667], - [-25., -10., 5.], - [-28., -16., -4.]]]]) + data = np.array([[[[4.75, 6., 7.25], + [5.375, 6.625, 7.875], + [6., 7.25, 8.5]]], + [[[6.625, 7.875, 9.125], + [7.25, 8.5, 9.75], + [7.875, 9.125, 10.375]]], + [[[8.5, 9.75, 11.], + [9.125, 10.375, 11.625], + [9.75, 11., 12.25]]]]) cube = self.percentile_cube plugin = Plugin() result = plugin.process(cube) self.assertArrayAlmostEqual(result.data, data) + +if __name__ == '__main__': + unittest.main() \ No newline at end of file From 7366fa1f08007a8c2fbd8d00efc5d73f2817a869 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 08:53:02 +0100 Subject: [PATCH 0114/1367] Pep8 fixes --- .../ensemble_copula_coupling.py | 14 +++++++------- .../ensemble_copula_coupling_utilities.py | 14 +++++++------- .../tests/helper_functions_ensemble_calibration.py | 5 +++-- ...ula_coupling_EnsembleCopulaCouplingUtilities.py | 2 +- ...ensemble_copula_coupling_ResamplePercentiles.py | 6 +++--- 5 files changed, 21 insertions(+), 20 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 31400e0246..74e9792e2a 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -319,12 +319,12 @@ def _add_bounds_to_thresholds_and_probabilities( probabilities_for_cdf, 0, 1) if np.any(np.diff(threshold_points) < 0): msg = ("The end points added to the threshold values for " - "constructing the Cumulative Distribution Function (CDF) " - "must result in an ascending order. " - "In this case, the threshold points {} must be " - "outside the allowable range given by the " - "bounds {}".format( - threshold_points, bounds_pairing)) + "constructing the Cumulative Distribution Function (CDF) " + "must result in an ascending order. " + "In this case, the threshold points {} must be " + "outside the allowable range given by the " + "bounds {}".format( + threshold_points, bounds_pairing)) raise ValueError(msg) return threshold_points, probabilities_for_cdf @@ -578,7 +578,7 @@ def process(self, calibrated_forecast_predictor_and_variance, """ (calibrated_forecast_predictor, calibrated_forecast_variance) = ( - calibrated_forecast_predictor_and_variance) + calibrated_forecast_predictor_and_variance) calibrated_forecast_predictor = concatenate_cubes( calibrated_forecast_predictor) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 09e4cfecba..ba7161654f 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -211,11 +211,11 @@ def get_bounds_of_distribution(cube_name, cube_units): units_of_bounds_for_ecdf[cube_name]) except KeyError as err: msg = ("The forecast_cube name: {} is not recognised" - "within bounds_for_ecdf {} or " - "units_of_bounds_for_ecdf: {}. \n" - "Error: {}".format( - cube_name, bounds_for_ecdf, - units_of_bounds_for_ecdf, err)) + "within bounds_for_ecdf {} or " + "units_of_bounds_for_ecdf: {}. \n" + "Error: {}".format( + cube_name, bounds_for_ecdf, + units_of_bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( @@ -268,12 +268,12 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( """ shape_to_reshape_to = list(original_cube.shape) if original_cube.coords( - input_probabilistic_dimension_name, dim_coords=True): + input_probabilistic_dimension_name, dim_coords=True): pat_coord_position = ( original_cube.coord_dims(input_probabilistic_dimension_name)) shape_to_reshape_to.pop(pat_coord_position[0]) elif original_cube.coords( - input_probabilistic_dimension_name, dim_coords=False): + input_probabilistic_dimension_name, dim_coords=False): pass else: msg = ("A {} coordinate is not available on the {} cube.".format( diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 16ccf850be..92dab3415a 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -121,8 +121,9 @@ def set_up_probability_above_threshold_spot_temperature_cube(): [[1.0, 0.2, 0.5, 0.2, 0.0, 0.1, 0.0, 0.0, 0.0]]]) - return set_up_probability_above_threshold_spot_cube( - data, "air_temperature", "1") + return ( + set_up_probability_above_threshold_spot_cube( + data, "air_temperature", "1")) def set_up_cube(data, phenomenon_standard_name, phenomenon_units, diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 26c2ae60cc..f8dbcc7aaa 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -55,7 +55,7 @@ class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( - IrisTest): + IrisTest): """Test the insert_lower_and_upper_endpoint_to_1d_array.""" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index e49dd712a6..1ce8b9b9d0 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -276,8 +276,8 @@ def test_lots_of_input_percentiles(self): data values for the percentiles, if there are lots of thresholds. """ input_forecast_values_1d = np.linspace(10, 20, 30) - input_forecast_values = np.tile(input_forecast_values_1d, (3, 3, 1, 1)).T - #print "input_percentiles = ", input_percentiles + input_forecast_values = ( + np.tile(input_forecast_values_1d, (3, 3, 1, 1)).T) data = np.array([[[[11., 15., 19.], [11., 15., 19.], @@ -422,4 +422,4 @@ def test_check_data_not_specifying_percentiles(self): if __name__ == '__main__': - unittest.main() \ No newline at end of file + unittest.main() From f9a42a60448d0bb03c928ef3b2c1c532d1cca21f Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 10:55:49 +0100 Subject: [PATCH 0115/1367] Add unit tests for RebadgePercentilesAsMembers. --- ...la_coupling_RebadgePercentilesAsMembers.py | 92 +++++++++++++++++++ 1 file changed, 92 insertions(+) create mode 100644 lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py diff --git a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py new file mode 100644 index 0000000000..586ccdda1d --- /dev/null +++ b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py @@ -0,0 +1,92 @@ +# -*- coding: utf-8 -*- +# ----------------------------------------------------------------------------- +# (C) British Crown Copyright 2017 Met Office. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, this +# list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +# * Neither the name of the copyright holder nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +""" +Unit tests for the +`ensemble_copula_coupling.RebadgePercentilesAsMembers` class. + +""" +import unittest + +from iris.coords import DimCoord +from iris.cube import Cube +from iris.exceptions import CoordinateNotFoundError +from iris.tests import IrisTest +import numpy as np + +from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( + RebadgePercentilesAsMembers as Plugin) +from improver.tests.helper_functions_ensemble_calibration import ( + set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) + + +class Test_process(IrisTest): + + """ + Test the process method of the RebadgePercentilesAsMembers plugin. + """ + + def setUp(self): + cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + percentile_points = np.arange(len(cube.coord("realization").points)) + cube.coord("realization").points = percentile_points + cube.coord("realization").rename("percentile") + self.current_temperature_cube = cube + + def test_basic(self): + """""" + cube = self.current_temperature_cube + plugin = Plugin() + result = plugin.process(cube) + self.assertIsInstance(result, Cube) + self.assertIsInstance(result.coord("realization"), DimCoord) + + def test_number_of_members(self): + """""" + cube = self.current_temperature_cube + plen = len(cube.coord("percentile").points) + plugin = Plugin() + result = plugin.process(cube) + self.assertEqual(len(result.coord("realization").points), plen) + + def test_no_percentile_coord(self): + """""" + cube = self.current_temperature_cube + cube.coord("percentile").rename("realization") + plugin = Plugin() + msg = "The percentile coordinate could not be found" + with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + plugin.process(cube) + + +if __name__ == '__main__': + unittest.main() From 461241ec9b905e2c2a7120c70fd85d807ad7b31a Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 13:58:40 +0100 Subject: [PATCH 0116/1367] Latest edits following review comments. A few unit tests fall, but just need small modifications. --- .../ensemble_copula_coupling.py | 17 +- .../ensemble_copula_coupling_utilities.py | 21 +- .../helper_functions_ensemble_calibration.py | 19 +- ...oupling_EnsembleCopulaCouplingUtilities.py | 807 +++++++++--------- ...ng_GeneratePercentilesFromProbabilities.py | 244 +++--- ...la_coupling_RebadgePercentilesAsMembers.py | 2 + ...ble_copula_coupling_ResamplePercentiles.py | 200 +++-- 7 files changed, 719 insertions(+), 591 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 74e9792e2a..3d7ed2c8b4 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -69,7 +69,7 @@ def process(self, cube): """ Rebadge percentiles as ensemble members. The ensemble member numbering will depend upon the number of percentiles in the input cube i.e. - 0, 1, 2, 3, ..., n, if there are n percentiles. + 0, 1, 2, 3, ..., n-1, if there are n percentiles. Parameters ---------- @@ -92,11 +92,11 @@ def process(self, cube): class ResamplePercentiles(object): """ Class for resampling percentiles from an existing set of percentiles. - In combination with the Ensemble Reordering plugin, this is Ensemble - Copula Coupling. + In combination with the Ensemble Reordering plugin, this is a variant of + Ensemble Copula Coupling. - This class includes the ability to interpolate from an input set of - percentiles to a different output set of percentiles. + This class includes the ability to linearly interpolate from an + input set of percentiles to a different output set of percentiles. """ @@ -191,6 +191,7 @@ def _sample_percentiles( (forecast_at_reshaped_percentiles.shape[0], len(desired_percentiles)))) for index in range(forecast_at_reshaped_percentiles.shape[0]): + print "forecast_at_reshaped_percentiles[index, :] = ", forecast_at_reshaped_percentiles[index, :] forecast_at_interpolated_percentiles[index, :] = np.interp( desired_percentiles, original_percentiles, forecast_at_reshaped_percentiles[index, :]) @@ -228,7 +229,7 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, no_of_percentiles : Integer or None Number of percentiles If None, the number of percentiles within the input - forecast_percentiles cube is used as the number of percentiles. + forecast_at_percentiles cube is used as the number of percentiles. sampling : String Type of sampling of the distribution to produce a set of percentiles e.g. quantile or random. @@ -266,8 +267,8 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, class GeneratePercentilesFromProbabilities(object): """ Class for generating percentiles from probabilities. - In combination with the Ensemble Reordering plugin, this is Ensemble - Copula Coupling. + In combination with the Ensemble Reordering plugin, this is a variant + Ensemble Copula Coupling. This class includes the ability to interpolate between probability thresholds in order to generate the percentiles, see Figure 1 from diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index ba7161654f..d357f36093 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -60,6 +60,11 @@ def concatenate_2d_array_with_2d_array_endpoints( high_endpoint : Number Number of used to create a 2d array of a constant value as the upper endpoint. + Returns + ------- + array_2d : Numpy array + 2d array of values after padding with the low_endpoint and + high_endpoint. """ lower_array = ( np.full((array_2d.shape[0], 1), low_endpoint)) @@ -236,6 +241,10 @@ def insert_lower_and_upper_endpoint_to_1d_array( Number of use as the lower endpoint. high_endpoint : Number Number of use as the upper endpoint. + Returns + ------- + array_1d : Numpy array + 1d array of values padded with the low_endpoint and high_endpoint. """ array_1d = np.insert(array_1d, 0, low_endpoint) array_1d = np.append(array_1d, high_endpoint) @@ -256,14 +265,18 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( The array that requires reshaping. original_cube : Iris.cube.Cube Cube containing the desired shape to be reshaped to, apart from the - ensemble dimension, for example, - [ensemble_dimension, time, y, x]. + probabilistic dimension, for example, + [probabilistic_dimension, time, y, x]. input_probabilistic_dimension_name : String Name of the dimension within the original cube, which represents the probabilistic dimension. output_probabilistic_dimension_length : Integer Length of the probabilistic dimension, which will be used to create the shape to which the array_to_reshape will be reshaped to. + Returns + ------- + Numpy array + The array after reshaping. """ shape_to_reshape_to = list(original_cube.shape) @@ -279,7 +292,9 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( msg = ("A {} coordinate is not available on the {} cube.".format( input_probabilistic_dimension_name, original_cube)) raise CoordinateNotFoundError(msg) - + print "array_to_reshape = ", array_to_reshape + array_to_reshape = array_to_reshape.T shape_to_reshape_to = ( [output_probabilistic_dimension_length] + shape_to_reshape_to) + print "shape_to_reshape_to = ", shape_to_reshape_to return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 92dab3415a..196098bf64 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -45,9 +45,12 @@ def set_up_probability_above_threshold_cube( data, phenomenon_standard_name, phenomenon_units, - forecast_thresholds=[8, 10, 12], + forecast_thresholds=[8, 10, 12], timesteps=1, y_dimension_length=3, x_dimension_length=3): - """Create a cube containing multiple realizations.""" + """ + Create a cube containing multiple probability_above_threshold + values for the coordinate. + """ cube = Cube(data, standard_name=phenomenon_standard_name, units=phenomenon_units) cube.add_dim_coord( @@ -56,7 +59,7 @@ def set_up_probability_above_threshold_cube( time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) - cube.add_dim_coord(DimCoord([402192.5], + cube.add_dim_coord(DimCoord(np.linspace(402192.5, 402292.5, timesteps), "time", units=tunit), 1) cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), 'latitude', units='degrees'), 2) @@ -66,7 +69,9 @@ def set_up_probability_above_threshold_cube( def set_up_probability_above_threshold_temperature_cube(): - """Create a cube with metadata and values suitable for air temperature.""" + """ + Create a cube with metadata and values suitable for air temperature. + """ data = np.array([[[[1.0, 0.9, 1.0], [0.8, 0.9, 0.5], [0.5, 0.2, 0.0]]], @@ -127,8 +132,8 @@ def set_up_probability_above_threshold_spot_temperature_cube(): def set_up_cube(data, phenomenon_standard_name, phenomenon_units, - realizations=[0, 1, 2], y_dimension_length=3, - x_dimension_length=3): + realizations=[0, 1, 2], timesteps=1, + y_dimension_length=3, x_dimension_length=3): """Create a cube containing multiple realizations.""" cube = Cube(data, standard_name=phenomenon_standard_name, units=phenomenon_units) @@ -137,7 +142,7 @@ def set_up_cube(data, phenomenon_standard_name, phenomenon_units, time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) - cube.add_dim_coord(DimCoord([402192.5], + cube.add_dim_coord(DimCoord(np.linspace(402192.5, 402292.5, timesteps), "time", units=tunit), 1) cube.add_dim_coord(DimCoord(np.linspace(-45.0, 45.0, y_dimension_length), 'latitude', units='degrees'), 2) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index f8dbcc7aaa..7a3c7dc2f4 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -49,11 +49,350 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import ( + set_up_cube, set_up_temperature_cube, set_up_spot_temperature_cube, _add_forecast_reference_time_and_forecast_period, set_up_probability_above_threshold_temperature_cube) +#class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): + + #"""Test the concatenate_2d_array_with_2d_array_endpoints.""" + + #def test_basic(self): + #""" + #Basic test that the result is a numpy array with the expected contents. + #""" + #expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) + #percentiles = np.array([[0.2, 0.5, 0.8]]) + #result = concatenate_2d_array_with_2d_array_endpoints( + #percentiles, 0, 1) + #self.assertIsInstance(result, np.ndarray) + #self.assertArrayAlmostEqual(result, expected) + + #def test_another_example(self): + #""" + #Another basic test that the result is a numpy array with the + #expected contents. + #""" + #expected = np.array( + # [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) + #percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) + #result = concatenate_2d_array_with_2d_array_endpoints( + #percentiles, -100, 10000) + #self.assertIsInstance(result, np.ndarray) + #self.assertArrayAlmostEqual(result, expected) + + #def test_1d_input(self): + #""" + #Test that a 1d input array results in the expected error. + #""" + #expected = np.array([-100, -40, 200, 1000, 10000]) + #percentiles = np.array([-40, 200, 1000]) + #msg = "all the input arrays must have same number of dimensions" + #with self.assertRaisesRegexp(ValueError, msg): + #concatenate_2d_array_with_2d_array_endpoints( + #percentiles, -100, 10000) + + #def test_3d_input(self): + #""" + #Test that a 3d input array results in the expected error. + #""" + #expected = np.array([[[-100, -40, 200, 1000, 10000]]]) + #percentiles = np.array([[[-40, 200, 1000]]]) + #msg = "all the input arrays must have same number of dimensions" + #with self.assertRaisesRegexp(ValueError, msg): + #concatenate_2d_array_with_2d_array_endpoints( + #percentiles, -100, 10000) + + +#class Test_create_cube_with_percentiles(IrisTest): + + #"""Test the _create_cube_with_percentiles plugin.""" + + #def setUp(self): + #"""Set up temperature cube.""" + #current_temperature_forecast_cube = ( + #_add_forecast_reference_time_and_forecast_period( + #set_up_temperature_cube())) + + #self.cube_data = current_temperature_forecast_cube.data + + #current_temperature_spot_forecast_cube = ( + #_add_forecast_reference_time_and_forecast_period( + #set_up_spot_temperature_cube())) + #self.cube_spot_data = ( + #current_temperature_spot_forecast_cube.data) + + #for cube in current_temperature_forecast_cube.slices_over( + #"realization"): + #cube.remove_coord("realization") + #break + #self.current_temperature_forecast_cube = cube + + #for cube in current_temperature_spot_forecast_cube.slices_over( + #"realization"): + #cube.remove_coord("realization") + #break + #self.current_temperature_spot_forecast_cube = cube + + #def test_basic(self): + #"""Test that the plugin returns an Iris.cube.Cube.""" + #cube = self.current_temperature_forecast_cube + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #result = create_cube_with_percentiles( + #percentiles, cube, cube_data) + #self.assertIsInstance(result, Cube) + + #def test_many_percentiles(self): + #""" + #Test that the plugin returns an Iris.cube.Cube with many + #percentiles. + #""" + #cube = self.current_temperature_forecast_cube + #percentiles = np.linspace(0, 1, 100) + #cube_data = np.zeros( + #[len(percentiles), len(cube.coord("time").points), + #len(cube.coord("latitude").points), + #len(cube.coord("longitude").points)]) + #result = create_cube_with_percentiles( + #percentiles, cube, cube_data) + #self.assertEqual(cube_data.shape, result.data.shape) + + #def test_incompatible_percentiles(self): + #""" + #Test that the plugin fails if the percentile values requested + #are not numbers. + #""" + #cube = self.current_temperature_forecast_cube + #percentiles = ["cat", "dog", "elephant"] + #cube_data = np.zeros( + #[len(percentiles), len(cube.coord("time").points), + #len(cube.coord("latitude").points), + #len(cube.coord("longitude").points)]) + #msg = "could not convert string to float" + #with self.assertRaisesRegexp(ValueError, msg): + #create_cube_with_percentiles(percentiles, cube, cube_data) + + #def test_percentile_points(self): + #""" + #Test that the plugin returns an Iris.cube.Cube + #with a percentile coordinate with the desired points. + #""" + #cube = self.current_temperature_forecast_cube + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #result = create_cube_with_percentiles(percentiles, cube, cube_data) + #self.assertIsInstance(result.coord("percentile"), DimCoord) + #self.assertArrayAlmostEqual( + #result.coord("percentile").points, percentiles) + + #def test_spot_forecasts_percentile_points(self): + #""" + #Test that the plugin returns a Cube with a percentile dimension + #coordinate and that the percentile dimension has the expected points + #for an input spot forecast. + #""" + #cube = self.current_temperature_spot_forecast_cube + #cube_data = self.cube_spot_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #result = create_cube_with_percentiles( + #percentiles, cube, cube_data) + #self.assertIsInstance(result, Cube) + #self.assertIsInstance(result.coord("percentile"), DimCoord) + #self.assertArrayAlmostEqual( + #result.coord("percentile").points, percentiles) + + #def test_percentile_length_too_short(self): + #""" + #Test that the plugin raises the default ValueError, if the number + #of percentiles is fewer than the length of the zeroth dimension within + #the cube. + #""" + #cube = self.current_temperature_forecast_cube + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5] + #msg = "Unequal lengths" + #with self.assertRaisesRegexp(ValueError, msg): + #create_cube_with_percentiles( + #percentiles, cube, cube_data) + + #def test_percentile_length_too_long(self): + #""" + #Test that the plugin raises the default ValueError, if the number + #of percentiles exceeds the length of the zeroth dimension within + #the cube. + #""" + #cube = self.current_temperature_forecast_cube + #cube = cube[0, :, :, :] + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #msg = "Unequal lengths" + #with self.assertRaisesRegexp(ValueError, msg): + #create_cube_with_percentiles( + #percentiles, cube, cube_data) + + #def test_metadata_copy(self): + #""" + #Test that the metadata dictionaries within the input cube, are + #also present on the output cube. + #""" + #cube = self.current_temperature_forecast_cube + #cube.attributes = {"source": "ukv"} + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #result = create_cube_with_percentiles( + #percentiles, cube, cube_data) + #self.assertDictEqual( + #cube.metadata._asdict(), result.metadata._asdict()) + + #def test_coordinate_copy(self): + #""" + #Test that the coordinates within the input cube, are + #also present on the output cube. + #""" + #cube = self.current_temperature_forecast_cube + #cube.attributes = {"source": "ukv"} + #cube_data = self.cube_data + 2 + #percentiles = [0.1, 0.5, 0.9] + #result = create_cube_with_percentiles( + #percentiles, cube, cube_data) + #for coord in cube.coords(): + #if coord not in result.coords(): + #msg = ( + #"Coordinate: {} not found in cube {}".format( + #coord, result)) + #raise CoordinateNotFoundError(msg) + + +#class Test_create_percentiles(IrisTest): + + #"""Test the create_percentiles plugin.""" + + #def test_basic(self): + #""" + #Test that the plugin returns a list with the expected number of + #percentiles. + #""" + #no_of_percentiles = 3 + #result = create_percentiles(no_of_percentiles) + #self.assertIsInstance(result, list) + #self.assertEqual(len(result), no_of_percentiles) + + #def test_data(self): + #""" + #Test that the plugin returns a list with the expected data values + #for the percentiles. + #""" + #data = np.array([0.25, 0.5, 0.75]) + #no_of_percentiles = 3 + #result = create_percentiles(no_of_percentiles) + #self.assertArrayAlmostEqual(result, data) + + #def test_random(self): + #""" + #Test that the plugin returns a list with the expected number of + #percentiles, if the random sampling option is selected. + #""" + #no_of_percentiles = 3 + #result = create_percentiles(no_of_percentiles, sampling="random") + #self.assertIsInstance(result, list) + #self.assertEqual(len(result), no_of_percentiles) + + #def test_unknown_sampling_option(self): + #""" + #Test that the plugin returns the expected error message, + #if an unknown sampling option is selected. + #""" + #no_of_percentiles = 3 + #msg = "The unknown sampling option is not yet implemented" + #with self.assertRaisesRegexp(ValueError, msg): + #create_percentiles(no_of_percentiles, sampling="unknown") + + +#class Test_get_bounds_of_distribution(IrisTest): + + #"""Test the get_bounds_of_distribution plugin.""" + + #def setUp(self): + #self.current_temperature_forecast_cube = ( + #_add_forecast_reference_time_and_forecast_period( + #set_up_probability_above_threshold_temperature_cube())) + + #def test_basic(self): + #"""Test that the result is a numpy array.""" + #cube = self.current_temperature_forecast_cube + #cube_units = cube.coord("probability_above_threshold").units + #result = get_bounds_of_distribution(cube.name(), cube_units) + #self.assertIsInstance(result, np.ndarray) + + #def test_check_data(self): + #""" + #Test that the expected results are returned for the bounds_pairing. + #""" + #cube = self.current_temperature_forecast_cube + #cube_units = cube.coord("probability_above_threshold").units + #bounds_pairing = (-40, 50) + #result = ( + #get_bounds_of_distribution(cube.name(), cube_units)) + #self.assertArrayAlmostEqual(result, bounds_pairing) + + #def test_check_unit_conversion(self): + #""" + #Test that the expected results are returned for the bounds_pairing, + #if the units of the bounds_pairings need to be converted to match + #the units of the forecast. + #""" + #cube = self.current_temperature_forecast_cube + #cube.coord("probability_above_threshold").convert_units("fahrenheit") + #cube_units = cube.coord("probability_above_threshold").units + #bounds_pairing = (-40, 122) # In fahrenheit + #result = ( + #get_bounds_of_distribution(cube.name(), cube_units)) + #self.assertArrayAlmostEqual(result, bounds_pairing) + + #def test_check_exception_is_raised(self): + #""" + #Test that the expected results are returned for the bounds_pairing. + #""" + #cube = self.current_temperature_forecast_cube + #cube.standard_name = None + #cube.long_name = "Nonsense" + #cube_units = cube.coord("probability_above_threshold").units + #msg = "The forecast_cube name" + #with self.assertRaisesRegexp(KeyError, msg): + #get_bounds_of_distribution(cube.name(), cube_units) + + +#class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): + + #"""Test the insert_lower_and_upper_endpoint_to_1d_array.""" + + #def test_basic(self): + #""" + #Basic test that the result is a numpy array with the expected contents. + #""" + #expected = [0, 0.2, 0.5, 0.8, 1] + #percentiles = [0.2, 0.5, 0.8] + #result = insert_lower_and_upper_endpoint_to_1d_array( + #percentiles, 0, 1) + #self.assertIsInstance(result, np.ndarray) + #self.assertArrayAlmostEqual(result, expected) + + #def test_another_example(self): + #""" + #Another basic test that the result is a numpy array with the + #expected contents. + #""" + #expected = [-100, -40, 200, 1000, 10000] + #percentiles = [-40, 200, 1000] + #result = insert_lower_and_upper_endpoint_to_1d_array( + #percentiles, -100, 10000) + #self.assertIsInstance(result, np.ndarray) + #self.assertArrayAlmostEqual(result, expected) + + class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( IrisTest): @@ -69,392 +408,94 @@ def setUp(self): cube.coord("realization").rename("percentile") self.current_temperature_forecast_cube = cube - def test_basic(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - cube = self.current_temperature_forecast_cube - input_array = cube.data - plen = len(cube.coord("percentile").points) - reshaped_array = ( - reshape_array_to_have_probabilistic_dimension_at_the_front( - cube.data, cube, "percentile", plen)) - self.assertIsInstance(reshaped_array, np.ndarray) - - def test_size_of_array(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - cube = self.current_temperature_forecast_cube - input_array = cube.data + #def test_basic(self): + #""" + #Basic test that the result is a numpy array with the expected contents. + #""" + #cube = self.current_temperature_forecast_cube + #input_array = cube.data + #plen = len(cube.coord("percentile").points) + #reshaped_array = ( + #reshape_array_to_have_probabilistic_dimension_at_the_front( + #cube.data, cube, "percentile", plen)) + #self.assertIsInstance(reshaped_array, np.ndarray) + + #def test_size_of_array(self): + #""" + #Test that the result have the expected size for the + #probabilistic dimension and is generally of the expected size. + #""" + #cube = self.current_temperature_forecast_cube + #input_array = cube.data + #plen = len(cube.coord("percentile").points) + #reshaped_array = ( + #reshape_array_to_have_probabilistic_dimension_at_the_front( + #cube.data, cube, "percentile", plen)) + #self.assertEqual(reshaped_array.shape[0], plen) + #self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) + + def test_data_check(self): + """ + Test that the data has been reshaped correctly. + """ + expected = np.array([[[[ 4., 6.], + [8., 6.85714286]], + [[8.85714286, 10.85714286], + [5.42857143, 7.42857143]]], + [[[9.42857143, 8.28571429], + [10.28571429, 12.28571429]], + [[4.71428571, 6.71428571], + [8.71428571, 7.57142857]]], + [[[9.57142857, 11.57142857], + [6.14285714, 8.14285714]], + [[10.14285714, 9.], + [11., 13.]]]]) + + data = np.tile(np.linspace(5, 10, 8), 3).reshape(3, 2, 2, 2) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + cube = set_up_cube(data, "air_temperature", "degreesC", + timesteps=2, x_dimension_length=2, + y_dimension_length=2) + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) plen = len(cube.coord("percentile").points) + percentile_cube = ( + _add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]))) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( - cube.data, cube, "percentile", plen)) - self.assertEqual(reshaped_array.shape[0], plen) - self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) - - def test_percentile_is_not_a_dimension_coordinate(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - cube = self.current_temperature_forecast_cube - for cube_slice in cube.slices_over("percentile"): - break - input_array = cube_slice.data - plen = len(cube_slice.coord("percentile").points) - reshaped_array = ( - reshape_array_to_have_probabilistic_dimension_at_the_front( - cube_slice.data, cube_slice, "percentile", plen)) - - def test_missing_coordinate(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - cube = self.current_temperature_forecast_cube - input_array = cube.data - plen = len(cube.coord("percentile").points) - msg = "coordinate is not available" - with self.assertRaisesRegexp(CoordinateNotFoundError, msg): - reshape_array_to_have_probabilistic_dimension_at_the_front( - cube.data, cube, "nonsense", plen) - - -class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): - - """Test the insert_lower_and_upper_endpoint_to_1d_array.""" - - def test_basic(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - expected = [0, 0.2, 0.5, 0.8, 1] - percentiles = [0.2, 0.5, 0.8] - result = insert_lower_and_upper_endpoint_to_1d_array( - percentiles, 0, 1) - self.assertIsInstance(result, np.ndarray) - self.assertArrayAlmostEqual(result, expected) - - def test_another_example(self): - """ - Another basic test that the result is a numpy array with the - expected contents. - """ - expected = [-100, -40, 200, 1000, 10000] - percentiles = [-40, 200, 1000] - result = insert_lower_and_upper_endpoint_to_1d_array( - percentiles, -100, 10000) - self.assertIsInstance(result, np.ndarray) - self.assertArrayAlmostEqual(result, expected) - - -class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): - - """Test the concatenate_2d_array_with_2d_array_endpoints.""" - - def test_basic(self): - """ - Basic test that the result is a numpy array with the expected contents. - """ - expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) - percentiles = np.array([[0.2, 0.5, 0.8]]) - result = concatenate_2d_array_with_2d_array_endpoints( - percentiles, 0, 1) - self.assertIsInstance(result, np.ndarray) - self.assertArrayAlmostEqual(result, expected) - - def test_another_example(self): - """ - Another basic test that the result is a numpy array with the - expected contents. - """ - expected = np.array([[-100, -40, 200, 1000, 10000]]) - percentiles = np.array([[-40, 200, 1000]]) - result = concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) - self.assertIsInstance(result, np.ndarray) - self.assertArrayAlmostEqual(result, expected) - - def test_1d_input(self): - """ - Test that a 1d input array results in the expected error. - """ - expected = np.array([-100, -40, 200, 1000, 10000]) - percentiles = np.array([-40, 200, 1000]) - msg = "all the input arrays must have same number of dimensions" - with self.assertRaisesRegexp(ValueError, msg): - concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) - - def test_3d_input(self): - """ - Test that a 3d input array results in the expected error. - """ - expected = np.array([[[-100, -40, 200, 1000, 10000]]]) - percentiles = np.array([[[-40, 200, 1000]]]) - msg = "all the input arrays must have same number of dimensions" - with self.assertRaisesRegexp(ValueError, msg): - concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) - - -class Test_create_cube_with_percentiles(IrisTest): - - """Test the _create_cube_with_percentiles plugin.""" - - def setUp(self): - """Set up temperature cube.""" - current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - - self.cube_data = current_temperature_forecast_cube.data - - current_temperature_spot_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_spot_temperature_cube())) - self.cube_spot_data = ( - current_temperature_spot_forecast_cube.data) - - for cube in current_temperature_forecast_cube.slices_over( - "realization"): - cube.remove_coord("realization") - break - self.current_temperature_forecast_cube = cube - - for cube in current_temperature_spot_forecast_cube.slices_over( - "realization"): - cube.remove_coord("realization") - break - self.current_temperature_spot_forecast_cube = cube - - def test_basic(self): - """Test that the plugin returns an Iris.cube.Cube.""" - cube = self.current_temperature_forecast_cube - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - - def test_many_percentiles(self): - """ - Test that the plugin returns an Iris.cube.Cube with many - percentiles. - """ - cube = self.current_temperature_forecast_cube - percentiles = np.linspace(0, 1, 100) - cube_data = np.zeros( - [len(percentiles), len(cube.coord("time").points), - len(cube.coord("latitude").points), - len(cube.coord("longitude").points)]) - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertEqual(cube_data.shape, result.data.shape) - - def test_incompatible_percentiles(self): - """ - Test that the plugin fails if the percentile values requested - are not numbers. - """ - cube = self.current_temperature_forecast_cube - percentiles = ["cat", "dog", "elephant"] - cube_data = np.zeros( - [len(percentiles), len(cube.coord("time").points), - len(cube.coord("latitude").points), - len(cube.coord("longitude").points)]) - msg = "could not convert string to float" - with self.assertRaisesRegexp(ValueError, msg): - create_cube_with_percentiles(percentiles, cube, cube_data) - - def test_percentile_points(self): - """ - Test that the plugin returns an Iris.cube.Cube - with a percentile coordinate with the desired points. - """ - cube = self.current_temperature_forecast_cube - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles(percentiles, cube, cube_data) - self.assertIsInstance(result.coord("percentile"), DimCoord) - self.assertArrayAlmostEqual( - result.coord("percentile").points, percentiles) - - def test_spot_forecasts_percentile_points(self): - """ - Test that the plugin returns a Cube with a percentile dimension - coordinate and that the percentile dimension has the expected points - for an input spot forecast. - """ - cube = self.current_temperature_spot_forecast_cube - cube_data = self.cube_spot_data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertIsInstance(result, Cube) - self.assertIsInstance(result.coord("percentile"), DimCoord) - self.assertArrayAlmostEqual( - result.coord("percentile").points, percentiles) - - def test_percentile_length_too_short(self): - """ - Test that the plugin raises the default ValueError, if the number - of percentiles is fewer than the length of the zeroth dimension within - the cube. - """ - cube = self.current_temperature_forecast_cube - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5] - msg = "Unequal lengths" - with self.assertRaisesRegexp(ValueError, msg): - create_cube_with_percentiles( - percentiles, cube, cube_data) - - def test_percentile_length_too_long(self): - """ - Test that the plugin raises the default ValueError, if the number - of percentiles exceeds the length of the zeroth dimension within - the cube. - """ - cube = self.current_temperature_forecast_cube - cube = cube[0, :, :, :] - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5, 0.9] - msg = "Unequal lengths" - with self.assertRaisesRegexp(ValueError, msg): - create_cube_with_percentiles( - percentiles, cube, cube_data) - - def test_metadata_copy(self): - """ - Test that the metadata dictionaries within the input cube, are - also present on the output cube. - """ - cube = self.current_temperature_forecast_cube - cube.attributes = {"source": "ukv"} - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - self.assertDictEqual( - cube.metadata._asdict(), result.metadata._asdict()) - - def test_coordinate_copy(self): - """ - Test that the coordinates within the input cube, are - also present on the output cube. - """ - cube = self.current_temperature_forecast_cube - cube.attributes = {"source": "ukv"} - cube_data = self.cube_data + 2 - percentiles = [0.1, 0.5, 0.9] - result = create_cube_with_percentiles( - percentiles, cube, cube_data) - for coord in cube.coords(): - if coord not in result.coords(): - msg = ( - "Coordinate: {} not found in cube {}".format( - coord, result)) - raise CoordinateNotFoundError(msg) - - -class Test_create_percentiles(IrisTest): - - """Test the create_percentiles plugin.""" - - def test_basic(self): - """ - Test that the plugin returns a list with the expected number of - percentiles. - """ - no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles) - self.assertIsInstance(result, list) - self.assertEqual(len(result), no_of_percentiles) - - def test_data(self): - """ - Test that the plugin returns a list with the expected data values - for the percentiles. - """ - data = np.array([0.25, 0.5, 0.75]) - no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles) - self.assertArrayAlmostEqual(result, data) - - def test_random(self): - """ - Test that the plugin returns a list with the expected number of - percentiles, if the random sampling option is selected. - """ - no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles, sampling="random") - self.assertIsInstance(result, list) - self.assertEqual(len(result), no_of_percentiles) - - def test_unknown_sampling_option(self): - """ - Test that the plugin returns the expected error message, - if an unknown sampling option is selected. - """ - no_of_percentiles = 3 - msg = "The unknown sampling option is not yet implemented" - with self.assertRaisesRegexp(ValueError, msg): - create_percentiles(no_of_percentiles, sampling="unknown") - - -class Test_get_bounds_of_distribution(IrisTest): - - """Test the get_bounds_of_distribution plugin.""" - - def setUp(self): - self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( - set_up_probability_above_threshold_temperature_cube())) - - def test_basic(self): - """Test that the result is a numpy array.""" - cube = self.current_temperature_forecast_cube - cube_units = cube.coord("probability_above_threshold").units - result = get_bounds_of_distribution(cube.name(), cube_units) - self.assertIsInstance(result, np.ndarray) - - def test_check_data(self): - """ - Test that the expected results are returned for the bounds_pairing. - """ - cube = self.current_temperature_forecast_cube - cube_units = cube.coord("probability_above_threshold").units - bounds_pairing = (-40, 50) - result = ( - get_bounds_of_distribution(cube.name(), cube_units)) - self.assertArrayAlmostEqual(result, bounds_pairing) - - def test_check_unit_conversion(self): - """ - Test that the expected results are returned for the bounds_pairing, - if the units of the bounds_pairings need to be converted to match - the units of the forecast. - """ - cube = self.current_temperature_forecast_cube - cube.coord("probability_above_threshold").convert_units("fahrenheit") - cube_units = cube.coord("probability_above_threshold").units - bounds_pairing = (-40, 122) # In fahrenheit - result = ( - get_bounds_of_distribution(cube.name(), cube_units)) - self.assertArrayAlmostEqual(result, bounds_pairing) - - def test_check_exception_is_raised(self): - """ - Test that the expected results are returned for the bounds_pairing. - """ - cube = self.current_temperature_forecast_cube - cube.standard_name = None - cube.long_name = "Nonsense" - cube_units = cube.coord("probability_above_threshold").units - msg = "The forecast_cube name" - with self.assertRaisesRegexp(KeyError, msg): - get_bounds_of_distribution(cube.name(), cube_units) + percentile_cube.data, percentile_cube, "percentile", plen)) + self.assertArrayAlmostEqual(reshaped_array, expected) + + #def test_percentile_is_not_a_dimension_coordinate(self): + #""" + #Test the array size, if the percentile coordinate is not a dimension + #coordinate on the cube. + #""" + #cube = self.current_temperature_forecast_cube + #for cube_slice in cube.slices_over("percentile"): + #break + #input_array = cube_slice.data + #plen = len(cube_slice.coord("percentile").points) + #reshaped_array = ( + #reshape_array_to_have_probabilistic_dimension_at_the_front( + #cube_slice.data, cube_slice, "percentile", plen)) + #self.assertEqual(reshaped_array.shape[0], plen) + #self.assertEqual(reshaped_array.shape, (1, 1, 3, 3)) + + #def test_missing_coordinate(self): + #""" + #Basic test that the result is a numpy array with the expected contents. + #""" + #cube = self.current_temperature_forecast_cube + #input_array = cube.data + #plen = len(cube.coord("percentile").points) + #msg = "coordinate is not available" + #with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + #reshape_array_to_have_probabilistic_dimension_at_the_front( + #cube.data, cube, "nonsense", plen) if __name__ == '__main__': diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index d4483ceaa8..5e85fb7ddd 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -180,6 +180,51 @@ def test_simple_check_data(self): cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, expected) + def test_check_data_multiple_timesteps(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + """ + expected = np.array([[[[8., 8.], + [-8., 8.66666667]], + [[8., -16.], + [8., -16.]]], + [[[12., 12.], + [12., 12.]], + [[10.5, 10.], + [10.5, 10.]]], + [[[31., 31.], + [31., 31.]], + [[11.5, 11.33333333], + [11.5, 12.]]]]) + + data = np.array([[[[0.8, 0.8], + [0.7, 0.9]], + [[0.8, 0.6], + [0.8, 0.6]]], + [[[0.6, 0.6], + [0.6, 0.6]], + [[0.5, 0.4], + [0.5, 0.4]]], + [[[0.4, 0.4], + [0.4, 0.4]], + [[0.1, 0.1], + [0.1, 0.2]]]]) + + cube = set_up_probability_above_threshold_cube( + data, "air_temperature", "degreesC", timesteps=2, + x_dimension_length=2, y_dimension_length=2) + self.probability_cube = ( + _add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]))) + cube = self.probability_cube + percentiles = [0.2, 0.6, 0.8] + bounds_pairing = (-40, 50) + plugin = Plugin() + result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, expected) + def test_probabilities_not_monotonically_increasing(self): """ Test that the plugin raises a ValueError when the probabilities @@ -223,15 +268,15 @@ def test_check_data(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles. """ - data = np.array([[[[15.8, 31., 46.2], - [8., 10., 31.], - [10.4, 12., 42.4]]], - [[[-16., 10, 31.], - [8., 10., 11.6], - [-30.4, 8., 12.]]], - [[[-30.4, 8., 11.], - [-34., -10., 9], - [-35.2, -16., 3.2]]]]) + data = np.array([[[[15.8, 8., 10.4], + [-16., 8., -30.4], + [-30.4, -34., -35.2]]], + [[[31., 10., 12.], + [10., 10., 8.], + [8., -10., -16.]]], + [[[46.2, 31., 42.4], + [31., 11.6, 12.], + [11., 9., 3.2]]]]) cube = self.current_temperature_forecast_cube percentiles = [0.1, 0.5, 0.9] @@ -247,15 +292,15 @@ def test_check_single_threshold(self): data values for the percentiles, if a single threshold is used for constructing the percentiles. """ - data = np.array([[[[12.2, 29., 45.8], - [8., 26.66666667, 45.33333333], - [12.2, 29., 45.8]]], - [[[-16., 23.75, 44.75], - [8., 26.66666667, 45.33333333], - [-30.4, 8., 41.6]]], - [[[-30.4, 8., 41.6], - [-34., -10., 29.], - [-35.2, -16., 3.2]]]]) + data = np.array([[[[12.2, 8., 12.2], + [-16., 8., -30.4], + [-30.4, -34., -35.2]]], + [[[29., 26.66666667, 29.], + [23.75, 26.66666667, 8.], + [8., -10., -16.]]], + [[[45.8, 45.33333333, 45.8], + [44.75, 45.33333333, 41.6], + [41.6, 29., 3.2]]]]) for acube in self.current_temperature_forecast_cube.slices_over( "probability_above_threshold"): @@ -276,15 +321,15 @@ def test_lots_of_probability_thresholds(self): input_probs_1d = np.linspace(1, 0, 30) input_probs = np.tile(input_probs_1d, (3, 3, 1, 1)).T - data = np.array([[[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]], - [[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]], - [[[2.9, 14.5, 26.1], - [2.9, 14.5, 26.1], - [2.9, 14.5, 26.1]]]]) + data = np.array([[[[2.9, 2.9, 2.9], + [2.9, 2.9, 2.9], + [2.9, 2.9, 2.9]]], + [[[14.5, 14.5, 14.5], + [14.5, 14.5, 14.5], + [14.5, 14.5, 14.5]]], + [[[26.1, 26.1, 26.1], + [26.1, 26.1, 26.1], + [26.1, 26.1, 26.1]]]]) temperature_values = np.arange(0, 30) cube = ( @@ -305,65 +350,39 @@ def test_lots_of_percentiles(self): data values for the percentiles, if lots of percentile values are requested. """ - data = np.array([[[[13.9, 15.8, 17.7], - [19.6, 21.5, 23.4], - [25.3, 27.2, 29.1]]], - [[[31., 32.9, 34.8], - [36.7, 38.6, 40.5], - [42.4, 44.3, 46.2]]], - [[[48.1, -16., 8.], - [8.25, 8.5, 8.75], - [9., 9.25, 9.5]]], - [[[9.75, 10., 10.33333333], - [10.66666667, 11., 11.33333333], - [11.66666667, 12., 21.5]]], - [[[31., 40.5, 10.2], - [10.4, 10.6, 10.8], - [11., 11.2, 11.4]]], - [[[11.6, 11.8, 12.], - [15.8, 19.6, 23.4], - [27.2, 31., 34.8]]], - [[[38.6, 42.4, 46.2], - [-28., -16., -4.], - [8., 8.33333333, 8.66666667]]], - [[[9., 9.33333333, 9.66666667], - [10., 10.33333333, 10.66666667], - [11., 11.33333333, 11.66666667]]], - [[[12., 21.5, 31.], - [40.5, -16., 8.], - [8.25, 8.5, 8.75]]], - [[[9., 9.25, 9.5], - [9.75, 10., 10.2], - [10.4, 10.6, 10.8]]], - [[[11., 11.2, 11.4], - [11.6, 11.8, -35.2], - [-30.4, -25.6, -20.8]]], - [[[-16., -11.2, -6.4], - [-1.6, 3.2, 8.], - [8.5, 9., 9.5]]], - [[[10., 10.5, 11.], - [11.5, 12., 31.], - [-35.2, -30.4, -25.6]]], - [[[-20.8, -16., -11.2], - [-6.4, -1.6, 3.2], - [8., 8.33333333, 8.66666667]]], - [[[9., 9.33333333, 9.66666667], - [10., 10.5, 11.], - [11.5, -37., -34.]]], - [[[-31., -28., -25.], - [-22., -19., -16.], - [-13., -10., -7.]]], - [[[-4., -1., 2.], - [5., 8., 8.5], - [9., 9.5, -37.6]]], - [[[-35.2, -32.8, -30.4], - [-28., -25.6, -23.2], - [-20.8, -18.4, -16.]]], - [[[-13.6, -11.2, -8.8], - [-6.4, -4., -1.6], - [0.8, 3.2, 5.6]]]]) + data = np.array([[[[13.9, -16., 10.2], + [-28., -16., -35.2], + [-35.2, -37., -37.6]]], + [[[17.7, 8.25, 10.6], + [-4., 8.25, -25.6], + [-25.6, -31., -32.8]]], + [[[21.5, 8.75, 11.], + [8.33333333, 8.75, -16.], + [-16., -25., -28.]]], + [[[25.3, 9.25, 11.4], + [9., 9.25, -6.4], + [-6.4, -19., -23.2]]], + [[[29.1, 9.75, 11.8], + [9.66666667, 9.75, 3.2], + [3.2, -13., -18.4]]], + [[[32.9, 10.33333333, 15.8], + [10.33333333, 10.2, 8.5], + [8.33333333, -7., -13.6]]], + [[[36.7, 11., 23.4], + [11., 10.6, 9.5], + [9., -1., -8.8]]], + [[[40.5, 11.66666667, 31.], + [11.66666667, 11., 10.5], + [9.66666667, 5., -4.]]], + [[[44.3, 21.5, 38.6], + [21.5, 11.4, 11.5], + [10.5, 8.5, 0.8]]], + [[[48.1, 40.5, 46.2], + [40.5, 11.8, 31.], + [11.5, 9.5, 5.6]]]]) + cube = self.current_temperature_forecast_cube - percentiles = np.arange(0.05, 1.0, 0.05) + percentiles = np.arange(0.05, 1.0, 0.1) bounds_pairing = (-40, 50) plugin = Plugin() result = plugin._probabilities_to_percentiles( @@ -375,15 +394,16 @@ def test_check_data_spot_forecasts(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles for spot forecasts. """ - data = np.array([[[15.8, 31., 46.2, - 8., 10., 31., - 10.4, 12., 42.4]], - [[-16., 10, 31., - 8., 10., 11.6, - -30.4, 8., 12.]], - [[-30.4, 8., 11., - -34., -10., 9, - -35.2, -16., 3.2]]]) + data = np.array([[[15.8, 8., 10.4, + -16., 8., -30.4, + -30.4, -34., -35.2]], + [[31., 10., 12., + 10., 10., 8., + 8., -10., -16.]], + [[46.2, 31., 42.4, + 31., 11.6, 12., + 11., 9., 3.2]]]) + cube = self.current_temperature_spot_forecast_cube percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) @@ -411,15 +431,15 @@ def test_check_data_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for a specific number of percentiles. """ - data = np.array([[[[21.5, 31., 40.5], - [8.75, 10., 11.66666667], - [11., 12., 31.]]], - [[[8.33333333, 10., 11.66666667], - [8.75, 10., 11.], - [-16., 8., 10.5]]], - [[[-16., 8., 9.66666667], - [-25., -10., 5.], - [-28., -16., -4.]]]]) + data = np.array([[[[21.5, 8.75, 11.], + [8.33333333, 8.75, -16.], + [-16., -25., -28.]]], + [[[31., 10., 12.], + [10., 10., 8.], + [ 8., -10., -16.]]], + [[[40.5, 11.66666667, 31.], + [11.66666667, 11., 10.5], + [9.66666667, 5., -4.]]]]) cube = self.current_temperature_forecast_cube percentiles = [0.1, 0.5, 0.9] @@ -433,15 +453,15 @@ def test_check_data_not_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values without specifying the number of percentiles. """ - data = np.array([[[[21.5, 31., 40.5], - [8.75, 10., 11.66666667], - [11., 12., 31.]]], - [[[8.33333333, 10., 11.66666667], - [8.75, 10., 11.], - [-16., 8., 10.5]]], - [[[-16., 8., 9.66666667], - [-25., -10., 5.], - [-28., -16., -4.]]]]) + data = np.array([[[[21.5, 8.75, 11.], + [8.33333333, 8.75, -16.], + [-16., -25., -28.]]], + [[[31., 10., 12.], + [10., 10., 8.], + [ 8., -10., -16.]]], + [[[40.5, 11.66666667, 31.], + [11.66666667, 11., 10.5], + [9.66666667, 5., -4.]]]]) cube = self.current_temperature_forecast_cube plugin = Plugin() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py index 586ccdda1d..e0499c95ba 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py @@ -77,6 +77,8 @@ def test_number_of_members(self): plugin = Plugin() result = plugin.process(cube) self.assertEqual(len(result.coord("realization").points), plen) + self.assertArrayAlmostEqual( + result.coord("realization").points, np.array([0, 1, 2])) def test_no_percentile_coord(self): """""" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 1ce8b9b9d0..1bb831f2aa 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -213,15 +213,15 @@ def test_check_data(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles. """ - data = np.array([[[[4.5, 6.5, 7.5], - [5.125, 7.125, 8.125], - [5.75, 7.75, 8.75]]], - [[[6.375, 8.375, 9.375], - [7., 9., 10.], - [7.625, 9.625, 10.625]]], - [[[8.25, 10.25, 11.25], - [8.875, 10.875, 11.875], - [9.5, 11.5, 12.5]]]]) + data = np.array([[[[4.5, 5.125, 5.75], + [6.375, 7., 7.625], + [8.25 , 8.875, 9.5]]], + [[[6.5 , 7.125, 7.75], + [8.375, 9., 9.625], + [10.25 , 10.875, 11.5]]], + [[[7.5 , 8.125, 8.75], + [9.375, 10., 10.625], + [11.25 , 11.875, 12.5]]]]) cube = self.percentile_cube percentiles = [0.2, 0.6, 0.8] @@ -231,21 +231,62 @@ def test_check_data(self): cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) + def test_check_data_multiple_timesteps(self): + """ + Test that the plugin returns an Iris.cube.Cube with the expected + data values for the percentiles. + """ + expected = np.array([[[[4.5, 5.21428571], + [5.92857143, 6.64285714]], + [[7.35714286, 8.07142857], + [8.78571429, 9.5]]], + [[[6.5, 7.21428571], + [7.92857143, 8.64285714]], + [[9.35714286, 10.07142857], + [10.78571429, 11.5]]], + [[[7.5, 8.21428571], + [8.92857143, 9.64285714]], + [[10.35714286, 11.07142857], + [11.78571429, 12.5]]]]) + + data = np.tile(np.linspace(5, 10, 8), 3).reshape(3, 2, 2, 2) + data[0] -= 1 + data[1] += 1 + data[2] += 3 + print "data = ", data + cube = set_up_cube(data, "air_temperature", "degreesC", + timesteps=2, x_dimension_length=2, + y_dimension_length=2) + cube.coord("realization").rename("percentile") + cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) + self.percentile_cube = ( + _add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]))) + cube = self.percentile_cube + percentiles = [0.2, 0.6, 0.8] + bounds_pairing = (-40, 50) + plugin = Plugin() + print "cube.data = ", cube.data + result = plugin._sample_percentiles( + cube, percentiles, bounds_pairing) + print "result.data = ", repr(result.data) + self.assertArrayAlmostEqual(result.data, expected) + def test_check_single_threshold(self): """ Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles, if a single percentile is used within the input set of percentiles. """ - expected = np.array([[[[4., 24.44444444, 44.88888889], - [4.625, 24.79166667, 44.95833333], - [5.25, 25.13888889, 45.02777778]]], - [[[5.875, 25.48611111, 45.09722222], - [6.5, 25.83333333, 45.16666667], - [7.125, 26.18055556, 45.23611111]]], - [[[7.75, 26.52777778, 45.30555556], - [8.375, 26.875, 45.375], - [9., 27.22222222, 45.44444444]]]]) + expected = np.array([[[[4., 4.625, 5.25], + [5.875, 6.5, 7.125], + [7.75, 8.375, 9.]]], + [[[24.44444444, 24.79166667, 25.13888889], + [25.48611111, 25.83333333, 26.18055556], + [26.52777778, 26.875, 27.22222222]]], + [[[44.88888889, 44.95833333, 45.02777778], + [45.09722222, 45.16666667, 45.23611111], + [45.30555556, 45.375, 45.44444444]]]]) data = np.array([8]) data = data[:, np.newaxis, np.newaxis, np.newaxis] @@ -268,6 +309,7 @@ def test_check_single_threshold(self): plugin = Plugin() result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) + self.assertArrayAlmostEqual(result.data, expected) def test_lots_of_input_percentiles(self): @@ -279,15 +321,15 @@ def test_lots_of_input_percentiles(self): input_forecast_values = ( np.tile(input_forecast_values_1d, (3, 3, 1, 1)).T) - data = np.array([[[[11., 15., 19.], - [11., 15., 19.], - [11., 15., 19.]]], - [[[11., 15., 19.], - [11., 15., 19.], - [11., 15., 19.]]], - [[[11., 15., 19.], - [11., 15., 19.], - [11., 15., 19.]]]]) + data = np.array([[[[11., 11., 11.], + [11., 11., 11.], + [11., 11., 11.]]], + [[[15., 15., 15.], + [15., 15., 15.], + [15., 15., 15.]]], + [[[19., 19., 19.], + [19., 19., 19.], + [19., 19., 19.]]]]) percentiles_values = np.linspace(0, 1, 30) cube = ( @@ -309,36 +351,37 @@ def test_lots_of_percentiles(self): data values for the percentiles, if lots of percentile values are requested. """ - data = np.array([[[[-18., 4.25, 4.75], - [5.25, 5.75, 6.25], - [6.75, 7.25, 7.75]]], - [[[29., -17.6875, 4.875], - [5.375, 5.875, 6.375], - [6.875, 7.375, 7.875]]], - [[[8.375, 29.3125, -17.375], - [5.5, 6., 6.5], - [7., 7.5, 8.]]], - [[[8.5, 9., 29.625], - [-17.0625, 6.125, 6.625], - [7.125, 7.625, 8.125]]], - [[[8.625, 9.125, 9.625], - [29.9375, -16.75, 6.75], - [7.25, 7.75, 8.25]]], - [[[8.75, 9.25, 9.75], - [10.25, 30.25, -16.4375], - [7.375, 7.875, 8.375]]], - [[[8.875, 9.375, 9.875], - [10.375, 10.875, 30.5625], - [-16.125, 8., 8.5]]], - [[[9., 9.5, 10.], - [10.5, 11., 11.5], - [30.875, -15.8125, 8.625]]], - [[[9.125, 9.625, 10.125], - [10.625, 11.125, 11.625], - [12.125, 31.1875, -15.5]]], - [[[9.25, 9.75, 10.25], - [10.75, 11.25, 11.75], - [12.25, 12.75, 31.5]]]]) + data = np.array([[[[-18., -17.6875, -17.375], + [-17.0625, -16.75, -16.4375], + [-16.125, -15.8125, -15.5]]], + [[[4.25, 4.875, 5.5], + [6.125, 6.75, 7.375], + [8., 8.625, 9.25]]], + [[[4.75, 5.375, 6.], + [6.625, 7.25, 7.875], + [8.5, 9.125, 9.75]]], + [[[5.25, 5.875, 6.5], + [7.125, 7.75, 8.375], + [9., 9.625, 10.25]]], + [[[5.75, 6.375, 7.], + [7.625, 8.25, 8.875], + [9.5, 10.125, 10.75]]], + [[[6.25, 6.875, 7.5], + [8.125, 8.75, 9.375], + [10., 10.625, 11.25]]], + [[[6.75, 7.375, 8.], + [8.625, 9.25, 9.875], + [10.5, 11.125, 11.75]]], + [[[7.25, 7.875, 8.5], + [9.125, 9.75, 10.375], + [11., 11.625, 12.25]]], + [[[7.75, 8.375, 9.], + [9.625, 10.25, 10.875], + [11.5, 12.125, 12.75]]], + [[[29., 29.3125, 29.625], + [29.9375, 30.25, 30.5625], + [30.875, 31.1875, 31.5]]]]) + cube = self.percentile_cube percentiles = np.arange(0.05, 1.0, 0.1) bounds_pairing = (-40, 50) @@ -352,9 +395,9 @@ def test_check_data_spot_forecasts(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for the percentiles for spot forecasts. """ - data = np.array([[[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]], - [[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]], - [[5., 5., 5., 7.5, 7.5, 7.5, 10., 10., 10.]]]) + data = np.array([[[5., 7.5, 10., 5., 7.5, 10., 5., 7.5, 10.]], + [[5., 7.5, 10., 5., 7.5, 10., 5., 7.5, 10.]], + [[5., 7.5, 10., 5., 7.5, 10., 5., 7.5, 10.]]]) cube = self.spot_percentile_cube percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) @@ -384,20 +427,21 @@ def test_check_data_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values for a specific number of percentiles. """ - data = np.array([[[[4.75, 6., 7.25], - [5.375, 6.625, 7.875], - [6., 7.25, 8.5]]], - [[[6.625, 7.875, 9.125], - [7.25, 8.5, 9.75], - [7.875, 9.125, 10.375]]], - [[[8.5, 9.75, 11.], - [9.125, 10.375, 11.625], - [9.75, 11., 12.25]]]]) + data = np.array([[[[4.75, 5.375, 6.], + [6.625, 7.25, 7.875], + [8.5, 9.125, 9.75]]], + [[[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]]], + [[[7.25, 7.875, 8.5], + [9.125, 9.75, 10.375], + [11., 11.625, 12.25]]]]) cube = self.percentile_cube percentiles = [0.25, 0.5, 0.75] plugin = Plugin() result = plugin.process(cube, no_of_percentiles=len(percentiles)) + print "result.data = ", repr(result.data) self.assertArrayAlmostEqual(result.data, data) def test_check_data_not_specifying_percentiles(self): @@ -405,15 +449,15 @@ def test_check_data_not_specifying_percentiles(self): Test that the plugin returns an Iris.cube.Cube with the expected data values without specifying the number of percentiles. """ - data = np.array([[[[4.75, 6., 7.25], - [5.375, 6.625, 7.875], - [6., 7.25, 8.5]]], - [[[6.625, 7.875, 9.125], - [7.25, 8.5, 9.75], - [7.875, 9.125, 10.375]]], - [[[8.5, 9.75, 11.], - [9.125, 10.375, 11.625], - [9.75, 11., 12.25]]]]) + data = np.array([[[[4.75, 5.375, 6.], + [6.625, 7.25, 7.875], + [8.5, 9.125, 9.75]]], + [[[6., 6.625, 7.25], + [7.875, 8.5, 9.125], + [9.75, 10.375, 11.]]], + [[[7.25, 7.875, 8.5], + [9.125, 9.75, 10.375], + [11., 11.625, 12.25]]]]) cube = self.percentile_cube plugin = Plugin() From 2491eb8254f8a5703c08711dfba1add52bb27954 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 15:03:00 +0100 Subject: [PATCH 0117/1367] Uncommented unit tests, and made one more unit test work successfully. A few unit tests still require edits. --- ...oupling_EnsembleCopulaCouplingUtilities.py | 776 +++++++++--------- ..._GeneratePercentilesFromMeanAndVariance.py | 451 +++++----- 2 files changed, 614 insertions(+), 613 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 7a3c7dc2f4..f12f771131 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -55,342 +55,342 @@ set_up_probability_above_threshold_temperature_cube) -#class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): - - #"""Test the concatenate_2d_array_with_2d_array_endpoints.""" - - #def test_basic(self): - #""" - #Basic test that the result is a numpy array with the expected contents. - #""" - #expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) - #percentiles = np.array([[0.2, 0.5, 0.8]]) - #result = concatenate_2d_array_with_2d_array_endpoints( - #percentiles, 0, 1) - #self.assertIsInstance(result, np.ndarray) - #self.assertArrayAlmostEqual(result, expected) - - #def test_another_example(self): - #""" - #Another basic test that the result is a numpy array with the - #expected contents. - #""" - #expected = np.array( - # [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) - #percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) - #result = concatenate_2d_array_with_2d_array_endpoints( - #percentiles, -100, 10000) - #self.assertIsInstance(result, np.ndarray) - #self.assertArrayAlmostEqual(result, expected) - - #def test_1d_input(self): - #""" - #Test that a 1d input array results in the expected error. - #""" - #expected = np.array([-100, -40, 200, 1000, 10000]) - #percentiles = np.array([-40, 200, 1000]) - #msg = "all the input arrays must have same number of dimensions" - #with self.assertRaisesRegexp(ValueError, msg): - #concatenate_2d_array_with_2d_array_endpoints( - #percentiles, -100, 10000) - - #def test_3d_input(self): - #""" - #Test that a 3d input array results in the expected error. - #""" - #expected = np.array([[[-100, -40, 200, 1000, 10000]]]) - #percentiles = np.array([[[-40, 200, 1000]]]) - #msg = "all the input arrays must have same number of dimensions" - #with self.assertRaisesRegexp(ValueError, msg): - #concatenate_2d_array_with_2d_array_endpoints( - #percentiles, -100, 10000) - - -#class Test_create_cube_with_percentiles(IrisTest): - - #"""Test the _create_cube_with_percentiles plugin.""" - - #def setUp(self): - #"""Set up temperature cube.""" - #current_temperature_forecast_cube = ( - #_add_forecast_reference_time_and_forecast_period( - #set_up_temperature_cube())) - - #self.cube_data = current_temperature_forecast_cube.data - - #current_temperature_spot_forecast_cube = ( - #_add_forecast_reference_time_and_forecast_period( - #set_up_spot_temperature_cube())) - #self.cube_spot_data = ( - #current_temperature_spot_forecast_cube.data) - - #for cube in current_temperature_forecast_cube.slices_over( - #"realization"): - #cube.remove_coord("realization") - #break - #self.current_temperature_forecast_cube = cube - - #for cube in current_temperature_spot_forecast_cube.slices_over( - #"realization"): - #cube.remove_coord("realization") - #break - #self.current_temperature_spot_forecast_cube = cube - - #def test_basic(self): - #"""Test that the plugin returns an Iris.cube.Cube.""" - #cube = self.current_temperature_forecast_cube - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #result = create_cube_with_percentiles( - #percentiles, cube, cube_data) - #self.assertIsInstance(result, Cube) - - #def test_many_percentiles(self): - #""" - #Test that the plugin returns an Iris.cube.Cube with many - #percentiles. - #""" - #cube = self.current_temperature_forecast_cube - #percentiles = np.linspace(0, 1, 100) - #cube_data = np.zeros( - #[len(percentiles), len(cube.coord("time").points), - #len(cube.coord("latitude").points), - #len(cube.coord("longitude").points)]) - #result = create_cube_with_percentiles( - #percentiles, cube, cube_data) - #self.assertEqual(cube_data.shape, result.data.shape) - - #def test_incompatible_percentiles(self): - #""" - #Test that the plugin fails if the percentile values requested - #are not numbers. - #""" - #cube = self.current_temperature_forecast_cube - #percentiles = ["cat", "dog", "elephant"] - #cube_data = np.zeros( - #[len(percentiles), len(cube.coord("time").points), - #len(cube.coord("latitude").points), - #len(cube.coord("longitude").points)]) - #msg = "could not convert string to float" - #with self.assertRaisesRegexp(ValueError, msg): - #create_cube_with_percentiles(percentiles, cube, cube_data) - - #def test_percentile_points(self): - #""" - #Test that the plugin returns an Iris.cube.Cube - #with a percentile coordinate with the desired points. - #""" - #cube = self.current_temperature_forecast_cube - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #result = create_cube_with_percentiles(percentiles, cube, cube_data) - #self.assertIsInstance(result.coord("percentile"), DimCoord) - #self.assertArrayAlmostEqual( - #result.coord("percentile").points, percentiles) - - #def test_spot_forecasts_percentile_points(self): - #""" - #Test that the plugin returns a Cube with a percentile dimension - #coordinate and that the percentile dimension has the expected points - #for an input spot forecast. - #""" - #cube = self.current_temperature_spot_forecast_cube - #cube_data = self.cube_spot_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #result = create_cube_with_percentiles( - #percentiles, cube, cube_data) - #self.assertIsInstance(result, Cube) - #self.assertIsInstance(result.coord("percentile"), DimCoord) - #self.assertArrayAlmostEqual( - #result.coord("percentile").points, percentiles) - - #def test_percentile_length_too_short(self): - #""" - #Test that the plugin raises the default ValueError, if the number - #of percentiles is fewer than the length of the zeroth dimension within - #the cube. - #""" - #cube = self.current_temperature_forecast_cube - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5] - #msg = "Unequal lengths" - #with self.assertRaisesRegexp(ValueError, msg): - #create_cube_with_percentiles( - #percentiles, cube, cube_data) - - #def test_percentile_length_too_long(self): - #""" - #Test that the plugin raises the default ValueError, if the number - #of percentiles exceeds the length of the zeroth dimension within - #the cube. - #""" - #cube = self.current_temperature_forecast_cube - #cube = cube[0, :, :, :] - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #msg = "Unequal lengths" - #with self.assertRaisesRegexp(ValueError, msg): - #create_cube_with_percentiles( - #percentiles, cube, cube_data) - - #def test_metadata_copy(self): - #""" - #Test that the metadata dictionaries within the input cube, are - #also present on the output cube. - #""" - #cube = self.current_temperature_forecast_cube - #cube.attributes = {"source": "ukv"} - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #result = create_cube_with_percentiles( - #percentiles, cube, cube_data) - #self.assertDictEqual( - #cube.metadata._asdict(), result.metadata._asdict()) - - #def test_coordinate_copy(self): - #""" - #Test that the coordinates within the input cube, are - #also present on the output cube. - #""" - #cube = self.current_temperature_forecast_cube - #cube.attributes = {"source": "ukv"} - #cube_data = self.cube_data + 2 - #percentiles = [0.1, 0.5, 0.9] - #result = create_cube_with_percentiles( - #percentiles, cube, cube_data) - #for coord in cube.coords(): - #if coord not in result.coords(): - #msg = ( - #"Coordinate: {} not found in cube {}".format( - #coord, result)) - #raise CoordinateNotFoundError(msg) - - -#class Test_create_percentiles(IrisTest): - - #"""Test the create_percentiles plugin.""" - - #def test_basic(self): - #""" - #Test that the plugin returns a list with the expected number of - #percentiles. - #""" - #no_of_percentiles = 3 - #result = create_percentiles(no_of_percentiles) - #self.assertIsInstance(result, list) - #self.assertEqual(len(result), no_of_percentiles) - - #def test_data(self): - #""" - #Test that the plugin returns a list with the expected data values - #for the percentiles. - #""" - #data = np.array([0.25, 0.5, 0.75]) - #no_of_percentiles = 3 - #result = create_percentiles(no_of_percentiles) - #self.assertArrayAlmostEqual(result, data) - - #def test_random(self): - #""" - #Test that the plugin returns a list with the expected number of - #percentiles, if the random sampling option is selected. - #""" - #no_of_percentiles = 3 - #result = create_percentiles(no_of_percentiles, sampling="random") - #self.assertIsInstance(result, list) - #self.assertEqual(len(result), no_of_percentiles) - - #def test_unknown_sampling_option(self): - #""" - #Test that the plugin returns the expected error message, - #if an unknown sampling option is selected. - #""" - #no_of_percentiles = 3 - #msg = "The unknown sampling option is not yet implemented" - #with self.assertRaisesRegexp(ValueError, msg): - #create_percentiles(no_of_percentiles, sampling="unknown") - - -#class Test_get_bounds_of_distribution(IrisTest): - - #"""Test the get_bounds_of_distribution plugin.""" - - #def setUp(self): - #self.current_temperature_forecast_cube = ( - #_add_forecast_reference_time_and_forecast_period( - #set_up_probability_above_threshold_temperature_cube())) - - #def test_basic(self): - #"""Test that the result is a numpy array.""" - #cube = self.current_temperature_forecast_cube - #cube_units = cube.coord("probability_above_threshold").units - #result = get_bounds_of_distribution(cube.name(), cube_units) - #self.assertIsInstance(result, np.ndarray) - - #def test_check_data(self): - #""" - #Test that the expected results are returned for the bounds_pairing. - #""" - #cube = self.current_temperature_forecast_cube - #cube_units = cube.coord("probability_above_threshold").units - #bounds_pairing = (-40, 50) - #result = ( - #get_bounds_of_distribution(cube.name(), cube_units)) - #self.assertArrayAlmostEqual(result, bounds_pairing) - - #def test_check_unit_conversion(self): - #""" - #Test that the expected results are returned for the bounds_pairing, - #if the units of the bounds_pairings need to be converted to match - #the units of the forecast. - #""" - #cube = self.current_temperature_forecast_cube - #cube.coord("probability_above_threshold").convert_units("fahrenheit") - #cube_units = cube.coord("probability_above_threshold").units - #bounds_pairing = (-40, 122) # In fahrenheit - #result = ( - #get_bounds_of_distribution(cube.name(), cube_units)) - #self.assertArrayAlmostEqual(result, bounds_pairing) - - #def test_check_exception_is_raised(self): - #""" - #Test that the expected results are returned for the bounds_pairing. - #""" - #cube = self.current_temperature_forecast_cube - #cube.standard_name = None - #cube.long_name = "Nonsense" - #cube_units = cube.coord("probability_above_threshold").units - #msg = "The forecast_cube name" - #with self.assertRaisesRegexp(KeyError, msg): - #get_bounds_of_distribution(cube.name(), cube_units) - - -#class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): - - #"""Test the insert_lower_and_upper_endpoint_to_1d_array.""" - - #def test_basic(self): - #""" - #Basic test that the result is a numpy array with the expected contents. - #""" - #expected = [0, 0.2, 0.5, 0.8, 1] - #percentiles = [0.2, 0.5, 0.8] - #result = insert_lower_and_upper_endpoint_to_1d_array( - #percentiles, 0, 1) - #self.assertIsInstance(result, np.ndarray) - #self.assertArrayAlmostEqual(result, expected) - - #def test_another_example(self): - #""" - #Another basic test that the result is a numpy array with the - #expected contents. - #""" - #expected = [-100, -40, 200, 1000, 10000] - #percentiles = [-40, 200, 1000] - #result = insert_lower_and_upper_endpoint_to_1d_array( - #percentiles, -100, 10000) - #self.assertIsInstance(result, np.ndarray) - #self.assertArrayAlmostEqual(result, expected) +class Test_concatenate_2d_array_with_2d_array_endpoints(IrisTest): + + """Test the concatenate_2d_array_with_2d_array_endpoints.""" + + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) + percentiles = np.array([[0.2, 0.5, 0.8]]) + result = concatenate_2d_array_with_2d_array_endpoints( + percentiles, 0, 1) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_another_example(self): + """ + Another basic test that the result is a numpy array with the + expected contents. + """ + expected = np.array( + [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) + percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) + result = concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_1d_input(self): + """ + Test that a 1d input array results in the expected error. + """ + expected = np.array([-100, -40, 200, 1000, 10000]) + percentiles = np.array([-40, 200, 1000]) + msg = "all the input arrays must have same number of dimensions" + with self.assertRaisesRegexp(ValueError, msg): + concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) + + def test_3d_input(self): + """ + Test that a 3d input array results in the expected error. + """ + expected = np.array([[[-100, -40, 200, 1000, 10000]]]) + percentiles = np.array([[[-40, 200, 1000]]]) + msg = "all the input arrays must have same number of dimensions" + with self.assertRaisesRegexp(ValueError, msg): + concatenate_2d_array_with_2d_array_endpoints( + percentiles, -100, 10000) + + +class Test_create_cube_with_percentiles(IrisTest): + + """Test the _create_cube_with_percentiles plugin.""" + + def setUp(self): + """Set up temperature cube.""" + current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + self.cube_data = current_temperature_forecast_cube.data + + current_temperature_spot_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_spot_temperature_cube())) + self.cube_spot_data = ( + current_temperature_spot_forecast_cube.data) + + for cube in current_temperature_forecast_cube.slices_over( + "realization"): + cube.remove_coord("realization") + break + self.current_temperature_forecast_cube = cube + + for cube in current_temperature_spot_forecast_cube.slices_over( + "realization"): + cube.remove_coord("realization") + break + self.current_temperature_spot_forecast_cube = cube + + def test_basic(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_forecast_cube + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + + def test_many_percentiles(self): + """ + Test that the plugin returns an Iris.cube.Cube with many + percentiles. + """ + cube = self.current_temperature_forecast_cube + percentiles = np.linspace(0, 1, 100) + cube_data = np.zeros( + [len(percentiles), len(cube.coord("time").points), + len(cube.coord("latitude").points), + len(cube.coord("longitude").points)]) + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertEqual(cube_data.shape, result.data.shape) + + def test_incompatible_percentiles(self): + """ + Test that the plugin fails if the percentile values requested + are not numbers. + """ + cube = self.current_temperature_forecast_cube + percentiles = ["cat", "dog", "elephant"] + cube_data = np.zeros( + [len(percentiles), len(cube.coord("time").points), + len(cube.coord("latitude").points), + len(cube.coord("longitude").points)]) + msg = "could not convert string to float" + with self.assertRaisesRegexp(ValueError, msg): + create_cube_with_percentiles(percentiles, cube, cube_data) + + def test_percentile_points(self): + """ + Test that the plugin returns an Iris.cube.Cube + with a percentile coordinate with the desired points. + """ + cube = self.current_temperature_forecast_cube + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles(percentiles, cube, cube_data) + self.assertIsInstance(result.coord("percentile"), DimCoord) + self.assertArrayAlmostEqual( + result.coord("percentile").points, percentiles) + + def test_spot_forecasts_percentile_points(self): + """ + Test that the plugin returns a Cube with a percentile dimension + coordinate and that the percentile dimension has the expected points + for an input spot forecast. + """ + cube = self.current_temperature_spot_forecast_cube + cube_data = self.cube_spot_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertIsInstance(result, Cube) + self.assertIsInstance(result.coord("percentile"), DimCoord) + self.assertArrayAlmostEqual( + result.coord("percentile").points, percentiles) + + def test_percentile_length_too_short(self): + """ + Test that the plugin raises the default ValueError, if the number + of percentiles is fewer than the length of the zeroth dimension within + the cube. + """ + cube = self.current_temperature_forecast_cube + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5] + msg = "Unequal lengths" + with self.assertRaisesRegexp(ValueError, msg): + create_cube_with_percentiles( + percentiles, cube, cube_data) + + def test_percentile_length_too_long(self): + """ + Test that the plugin raises the default ValueError, if the number + of percentiles exceeds the length of the zeroth dimension within + the cube. + """ + cube = self.current_temperature_forecast_cube + cube = cube[0, :, :, :] + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + msg = "Unequal lengths" + with self.assertRaisesRegexp(ValueError, msg): + create_cube_with_percentiles( + percentiles, cube, cube_data) + + def test_metadata_copy(self): + """ + Test that the metadata dictionaries within the input cube, are + also present on the output cube. + """ + cube = self.current_temperature_forecast_cube + cube.attributes = {"source": "ukv"} + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + self.assertDictEqual( + cube.metadata._asdict(), result.metadata._asdict()) + + def test_coordinate_copy(self): + """ + Test that the coordinates within the input cube, are + also present on the output cube. + """ + cube = self.current_temperature_forecast_cube + cube.attributes = {"source": "ukv"} + cube_data = self.cube_data + 2 + percentiles = [0.1, 0.5, 0.9] + result = create_cube_with_percentiles( + percentiles, cube, cube_data) + for coord in cube.coords(): + if coord not in result.coords(): + msg = ( + "Coordinate: {} not found in cube {}".format( + coord, result)) + raise CoordinateNotFoundError(msg) + + +class Test_create_percentiles(IrisTest): + + """Test the create_percentiles plugin.""" + + def test_basic(self): + """ + Test that the plugin returns a list with the expected number of + percentiles. + """ + no_of_percentiles = 3 + result = create_percentiles(no_of_percentiles) + self.assertIsInstance(result, list) + self.assertEqual(len(result), no_of_percentiles) + + def test_data(self): + """ + Test that the plugin returns a list with the expected data values + for the percentiles. + """ + data = np.array([0.25, 0.5, 0.75]) + no_of_percentiles = 3 + result = create_percentiles(no_of_percentiles) + self.assertArrayAlmostEqual(result, data) + + def test_random(self): + """ + Test that the plugin returns a list with the expected number of + percentiles, if the random sampling option is selected. + """ + no_of_percentiles = 3 + result = create_percentiles(no_of_percentiles, sampling="random") + self.assertIsInstance(result, list) + self.assertEqual(len(result), no_of_percentiles) + + def test_unknown_sampling_option(self): + """ + Test that the plugin returns the expected error message, + if an unknown sampling option is selected. + """ + no_of_percentiles = 3 + msg = "The unknown sampling option is not yet implemented" + with self.assertRaisesRegexp(ValueError, msg): + create_percentiles(no_of_percentiles, sampling="unknown") + + +class Test_get_bounds_of_distribution(IrisTest): + + """Test the get_bounds_of_distribution plugin.""" + + def setUp(self): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_probability_above_threshold_temperature_cube())) + + def test_basic(self): + """Test that the result is a numpy array.""" + cube = self.current_temperature_forecast_cube + cube_units = cube.coord("probability_above_threshold").units + result = get_bounds_of_distribution(cube.name(), cube_units) + self.assertIsInstance(result, np.ndarray) + + def test_check_data(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + cube_units = cube.coord("probability_above_threshold").units + bounds_pairing = (-40, 50) + result = ( + get_bounds_of_distribution(cube.name(), cube_units)) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_unit_conversion(self): + """ + Test that the expected results are returned for the bounds_pairing, + if the units of the bounds_pairings need to be converted to match + the units of the forecast. + """ + cube = self.current_temperature_forecast_cube + cube.coord("probability_above_threshold").convert_units("fahrenheit") + cube_units = cube.coord("probability_above_threshold").units + bounds_pairing = (-40, 122) # In fahrenheit + result = ( + get_bounds_of_distribution(cube.name(), cube_units)) + self.assertArrayAlmostEqual(result, bounds_pairing) + + def test_check_exception_is_raised(self): + """ + Test that the expected results are returned for the bounds_pairing. + """ + cube = self.current_temperature_forecast_cube + cube.standard_name = None + cube.long_name = "Nonsense" + cube_units = cube.coord("probability_above_threshold").units + msg = "The forecast_cube name" + with self.assertRaisesRegexp(KeyError, msg): + get_bounds_of_distribution(cube.name(), cube_units) + + +class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): + + """Test the insert_lower_and_upper_endpoint_to_1d_array.""" + + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + expected = [0, 0.2, 0.5, 0.8, 1] + percentiles = [0.2, 0.5, 0.8] + result = insert_lower_and_upper_endpoint_to_1d_array( + percentiles, 0, 1) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) + + def test_another_example(self): + """ + Another basic test that the result is a numpy array with the + expected contents. + """ + expected = [-100, -40, 200, 1000, 10000] + percentiles = [-40, 200, 1000] + result = insert_lower_and_upper_endpoint_to_1d_array( + percentiles, -100, 10000) + self.assertIsInstance(result, np.ndarray) + self.assertArrayAlmostEqual(result, expected) class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( @@ -408,31 +408,31 @@ def setUp(self): cube.coord("realization").rename("percentile") self.current_temperature_forecast_cube = cube - #def test_basic(self): - #""" - #Basic test that the result is a numpy array with the expected contents. - #""" - #cube = self.current_temperature_forecast_cube - #input_array = cube.data - #plen = len(cube.coord("percentile").points) - #reshaped_array = ( - #reshape_array_to_have_probabilistic_dimension_at_the_front( - #cube.data, cube, "percentile", plen)) - #self.assertIsInstance(reshaped_array, np.ndarray) - - #def test_size_of_array(self): - #""" - #Test that the result have the expected size for the - #probabilistic dimension and is generally of the expected size. - #""" - #cube = self.current_temperature_forecast_cube - #input_array = cube.data - #plen = len(cube.coord("percentile").points) - #reshaped_array = ( - #reshape_array_to_have_probabilistic_dimension_at_the_front( - #cube.data, cube, "percentile", plen)) - #self.assertEqual(reshaped_array.shape[0], plen) - #self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) + def test_basic(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "percentile", plen)) + self.assertIsInstance(reshaped_array, np.ndarray) + + def test_size_of_array(self): + """ + Test that the result have the expected size for the + probabilistic dimension and is generally of the expected size. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "percentile", plen)) + self.assertEqual(reshaped_array.shape[0], plen) + self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) def test_data_check(self): """ @@ -469,33 +469,33 @@ def test_data_check(self): percentile_cube.data, percentile_cube, "percentile", plen)) self.assertArrayAlmostEqual(reshaped_array, expected) - #def test_percentile_is_not_a_dimension_coordinate(self): - #""" - #Test the array size, if the percentile coordinate is not a dimension - #coordinate on the cube. - #""" - #cube = self.current_temperature_forecast_cube - #for cube_slice in cube.slices_over("percentile"): - #break - #input_array = cube_slice.data - #plen = len(cube_slice.coord("percentile").points) - #reshaped_array = ( - #reshape_array_to_have_probabilistic_dimension_at_the_front( - #cube_slice.data, cube_slice, "percentile", plen)) - #self.assertEqual(reshaped_array.shape[0], plen) - #self.assertEqual(reshaped_array.shape, (1, 1, 3, 3)) - - #def test_missing_coordinate(self): - #""" - #Basic test that the result is a numpy array with the expected contents. - #""" - #cube = self.current_temperature_forecast_cube - #input_array = cube.data - #plen = len(cube.coord("percentile").points) - #msg = "coordinate is not available" - #with self.assertRaisesRegexp(CoordinateNotFoundError, msg): - #reshape_array_to_have_probabilistic_dimension_at_the_front( - #cube.data, cube, "nonsense", plen) + def test_percentile_is_not_a_dimension_coordinate(self): + """ + Test the array size, if the percentile coordinate is not a dimension + coordinate on the cube. + """ + cube = self.current_temperature_forecast_cube + for cube_slice in cube.slices_over("percentile"): + break + input_array = cube_slice.data + plen = len(cube_slice.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube_slice.data, cube_slice, "percentile", plen)) + self.assertEqual(reshaped_array.shape[0], plen) + self.assertEqual(reshaped_array.shape, (1, 1, 3, 3)) + + def test_missing_coordinate(self): + """ + Basic test that the result is a numpy array with the expected contents. + """ + cube = self.current_temperature_forecast_cube + input_array = cube.data + plen = len(cube.coord("percentile").points) + msg = "coordinate is not available" + with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "nonsense", plen) if __name__ == '__main__': diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 100de55259..4342f6bccf 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -60,74 +60,74 @@ def setUp(self): _add_forecast_reference_time_and_forecast_period( set_up_spot_temperature_cube())) - def test_check_data(self): - """ - Test that the plugin returns an Iris.cube.Cube matching the expected - data values when a cube containing mean and variance is passed in. - The resulting data values are the percentiles, which have been - generated. - """ - data = np.array([[[[225.56812863, 236.81812863, 248.06812863], - [259.31812863, 270.56812863, 281.81812863], - [293.06812863, 304.31812863, 315.56812863]]], - [[[229.48333333, 240.73333333, 251.98333333], - [263.23333333, 274.48333333, 285.73333333], - [296.98333333, 308.23333333, 319.48333333]]], - [[[233.39853804, 244.64853804, 255.89853804], - [267.14853804, 278.39853804, 289.64853804], - [300.89853804, 312.14853804, 323.39853804]]]]) - - cube = self.current_temperature_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, - percentiles) - self.assertIsInstance(result, Cube) - self.assertArrayAlmostEqual(result.data, data) - - def test_simple_data(self): - """ - Test that the plugin returns the expected values for the generated - percentiles when an idealised set of data values between 1 and 3 - is used to create the mean and the variance. - """ - data = np.array([[[[1, 1, 1], - [1, 1, 1], - [1, 1, 1]]], - [[[2, 2, 2], - [2, 2, 2], - [2, 2, 2]]], - [[[3, 3, 3], - [3, 3, 3], - [3, 3, 3]]]]) - - result_data = np.array([[[[0.71844843, 0.71844843, 0.71844843], - [0.71844843, 0.71844843, 0.71844843], - [0.71844843, 0.71844843, 0.71844843]]], - [[[2., 2., 2.], - [2., 2., 2.], - [2., 2., 2.]]], - [[[3.28155157, 3.28155157, 3.28155157], - [3.28155157, 3.28155157, 3.28155157], - [3.28155157, 3.28155157, 3.28155157]]]]) - - cube = self.current_temperature_forecast_cube - cube.data = data - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, - percentiles) - self.assertArrayAlmostEqual(result.data, result_data) + #def test_check_data(self): + #""" + #Test that the plugin returns an Iris.cube.Cube matching the expected + #data values when a cube containing mean and variance is passed in. + #The resulting data values are the percentiles, which have been + #generated. + #""" + #data = np.array([[[[225.56812863, 229.48333333, 233.39853804], + #[236.81812863, 240.73333333, 244.64853804], + #[248.06812863, 251.98333333, 255.89853804]]], + #[[[259.31812863, 263.23333333, 267.14853804], + #[270.56812863, 274.48333333, 278.39853804], + #[281.81812863, 285.73333333, 289.64853804]]], + #[[[293.06812863, 296.98333333, 300.89853804], + #[304.31812863, 308.23333333, 312.14853804], + #[315.56812863, 319.48333333, 323.39853804]]]]) + + #cube = self.current_temperature_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = [0.1, 0.5, 0.9] + #plugin = Plugin() + #result = plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, + #percentiles) + #self.assertIsInstance(result, Cube) + #self.assertArrayAlmostEqual(result.data, data) + + #def test_simple_data(self): + #""" + #Test that the plugin returns the expected values for the generated + #percentiles when an idealised set of data values between 1 and 3 + #is used to create the mean and the variance. + #""" + #data = np.array([[[[1, 1, 1], + #[1, 1, 1], + #[1, 1, 1]]], + #[[[2, 2, 2], + #[2, 2, 2], + #[2, 2, 2]]], + #[[[3, 3, 3], + #[3, 3, 3], + #[3, 3, 3]]]]) + + #result_data = np.array([[[[0.71844843, 0.71844843, 0.71844843], + #[0.71844843, 0.71844843, 0.71844843], + #[0.71844843, 0.71844843, 0.71844843]]], + #[[[2., 2., 2.], + #[2., 2., 2.], + #[2., 2., 2.]]], + #[[[3.28155157, 3.28155157, 3.28155157], + #[3.28155157, 3.28155157, 3.28155157], + #[3.28155157, 3.28155157, 3.28155157]]]]) + + #cube = self.current_temperature_forecast_cube + #cube.data = data + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = [0.1, 0.5, 0.9] + #plugin = Plugin() + #result = plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, + #percentiles) + #self.assertArrayAlmostEqual(result.data, result_data) def test_if_identical_data(self): """ @@ -144,54 +144,13 @@ def test_if_identical_data(self): data = np.repeat(data[np.newaxis, np.newaxis, :, :], 3, axis=0) result_data = np.array([[[[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]], + [[[2., 2., 2.], [2., 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [2., 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [2., 2., 2.], - [3., 3., 3.]]]]) - - cube = self.current_temperature_forecast_cube - cube.data = data - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, - percentiles) - self.assertArrayAlmostEqual(result.data, result_data) - - def test_if_nearly_identical_data(self): - """ - Test that the plugin returns the expected values, if every - percentile has an identical value. This causes an issue because - the default for the underlying scipy function is to yield a NaN for - tied values. For this application, any NaN values are overwritten with - the predicted mean value for all probability thresholds. - """ - data = np.array([[[[1., 1., 1.], - [4., 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [2., 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [2., 2., 2.], - [3., 3., 3.]]]]) - - result_data = np.array([[[[1., 1., 1.], - [1.186858, 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [2.66666667, 2., 2.], - [3., 3., 3.]]], - [[[1., 1., 1.], - [4.14647495, 2., 2.], + [2., 2., 2.]]], + [[[3., 3., 3.], + [3., 3., 3.], [3., 3., 3.]]]]) cube = self.current_temperature_forecast_cube @@ -205,118 +164,160 @@ def test_if_nearly_identical_data(self): result = plugin._mean_and_variance_to_percentiles( current_forecast_predictor, current_forecast_variance, percentiles) + print "result.data = ", repr(result.data) self.assertArrayAlmostEqual(result.data, result_data) - def test_many_percentiles(self): - """ - Test that the plugin returns an iris.cube.Cube if many percentiles - are requested. - """ - cube = self.current_temperature_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = np.linspace(0.01, 0.99, num=1000, endpoint=True) - plugin = Plugin() - result = plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, percentiles) - self.assertIsInstance(result, Cube) - - def test_negative_percentiles(self): - """ - Test that the plugin returns the expected values for the - percentiles if negative probabilities are requested. - """ - cube = self.current_temperature_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = [-0.1, 0.1] - plugin = Plugin() - msg = "NaNs are present within the result for the" - with self.assertRaisesRegexp(ValueError, msg): - plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, - percentiles) - - def test_spot_forecasts_check_data(self): - """ - Test that the plugin returns an Iris.cube.Cube matching the expected - data values when a cube containing mean and variance is passed in. - The resulting data values are the percentiles, which have been - generated for a spot forecast. - """ - data = np.array([[[225.56812863, 236.81812863, 248.06812863, - 259.31812863, 270.56812863, 281.81812863, - 293.06812863, 304.31812863, 315.56812863]], - [[229.48333333, 240.73333333, 251.98333333, - 263.23333333, 274.48333333, 285.73333333, - 296.98333333, 308.23333333, 319.48333333]], - [[233.39853804, 244.64853804, 255.89853804, - 267.14853804, 278.39853804, 289.64853804, - 300.89853804, 312.14853804, 323.39853804]]]) - - cube = self.current_temperature_spot_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - percentiles = [0.1, 0.5, 0.9] - plugin = Plugin() - result = plugin._mean_and_variance_to_percentiles( - current_forecast_predictor, current_forecast_variance, - percentiles) - self.assertIsInstance(result, Cube) - self.assertArrayAlmostEqual(result.data, data) - - -class Test_process(IrisTest): - - """Test the process plugin.""" - - def setUp(self): - """Set up temperature cube.""" - self.current_temperature_forecast_cube = ( - add_forecast_reference_time_and_forecast_period( - set_up_temperature_cube())) - - def test_basic(self): - """Test that the plugin returns an Iris.cube.Cube.""" - cube = self.current_temperature_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - raw_forecast = cube.copy() - - predictor_and_variance = CubeList( - [current_forecast_predictor, current_forecast_variance]) - - plugin = Plugin() - result = plugin.process(predictor_and_variance, raw_forecast) - self.assertIsInstance(result, Cube) - - def test_number_of_percentiles(self): - """ - Test that the plugin returns a cube with the expected number of - percentiles. - """ - cube = self.current_temperature_forecast_cube - current_forecast_predictor = cube.collapsed( - "realization", iris.analysis.MEAN) - current_forecast_variance = cube.collapsed( - "realization", iris.analysis.VARIANCE) - raw_forecast = cube.copy() - - predictor_and_variance = CubeList( - [current_forecast_predictor, current_forecast_variance]) - - plugin = Plugin() - result = plugin.process(predictor_and_variance, raw_forecast) - self.assertEqual(len(raw_forecast.coord("realization").points), - len(result.coord("percentile").points)) + #def test_if_nearly_identical_data(self): + #""" + #Test that the plugin returns the expected values, if every + #percentile has an identical value. This causes an issue because + #the default for the underlying scipy function is to yield a NaN for + #tied values. For this application, any NaN values are overwritten with + #the predicted mean value for all probability thresholds. + #""" + #data = np.array([[[[1., 1., 1.], + #[4., 2., 2.], + #[3., 3., 3.]]], + #[[[1., 1., 1.], + #[2., 2., 2.], + #[3., 3., 3.]]], + #[[[1., 1., 1.], + #[2., 2., 2.], + #[3., 3., 3.]]]]) + + #result_data = np.array([[[[1., 1., 1.], + #[1.186858, 2., 2.], + #[3., 3., 3.]]], + #[[[1., 1., 1.], + #[2.66666667, 2., 2.], + #[3., 3., 3.]]], + #[[[1., 1., 1.], + #[4.14647495, 2., 2.], + #[3., 3., 3.]]]]) + + #cube = self.current_temperature_forecast_cube + #cube.data = data + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = [0.1, 0.5, 0.9] + #plugin = Plugin() + #result = plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, + #percentiles) + #self.assertArrayAlmostEqual(result.data, result_data) + + #def test_many_percentiles(self): + #""" + #Test that the plugin returns an iris.cube.Cube if many percentiles + #are requested. + #""" + #cube = self.current_temperature_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = np.linspace(0.01, 0.99, num=1000, endpoint=True) + #plugin = Plugin() + #result = plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, percentiles) + #self.assertIsInstance(result, Cube) + + #def test_negative_percentiles(self): + #""" + #Test that the plugin returns the expected values for the + #percentiles if negative probabilities are requested. + #""" + #cube = self.current_temperature_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = [-0.1, 0.1] + #plugin = Plugin() + #msg = "NaNs are present within the result for the" + #with self.assertRaisesRegexp(ValueError, msg): + #plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, + #percentiles) + + #def test_spot_forecasts_check_data(self): + #""" + #Test that the plugin returns an Iris.cube.Cube matching the expected + #data values when a cube containing mean and variance is passed in. + #The resulting data values are the percentiles, which have been + #generated for a spot forecast. + #""" + #data = np.array([[[225.56812863, 236.81812863, 248.06812863, + #259.31812863, 270.56812863, 281.81812863, + #293.06812863, 304.31812863, 315.56812863]], + #[[229.48333333, 240.73333333, 251.98333333, + #263.23333333, 274.48333333, 285.73333333, + #296.98333333, 308.23333333, 319.48333333]], + #[[233.39853804, 244.64853804, 255.89853804, + #267.14853804, 278.39853804, 289.64853804, + #300.89853804, 312.14853804, 323.39853804]]]) + + #cube = self.current_temperature_spot_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #percentiles = [0.1, 0.5, 0.9] + #plugin = Plugin() + #result = plugin._mean_and_variance_to_percentiles( + #current_forecast_predictor, current_forecast_variance, + #percentiles) + #self.assertIsInstance(result, Cube) + #self.assertArrayAlmostEqual(result.data, data) + + +#class Test_process(IrisTest): + + #"""Test the process plugin.""" + + #def setUp(self): + #"""Set up temperature cube.""" + #self.current_temperature_forecast_cube = ( + #_add_forecast_reference_time_and_forecast_period( + #set_up_temperature_cube())) + + #def test_basic(self): + #"""Test that the plugin returns an Iris.cube.Cube.""" + #cube = self.current_temperature_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #raw_forecast = cube.copy() + + #predictor_and_variance = CubeList( + #[current_forecast_predictor, current_forecast_variance]) + + #plugin = Plugin() + #result = plugin.process(predictor_and_variance, raw_forecast) + #self.assertIsInstance(result, Cube) + + #def test_number_of_percentiles(self): + #""" + #Test that the plugin returns a cube with the expected number of + #percentiles. + #""" + #cube = self.current_temperature_forecast_cube + #current_forecast_predictor = cube.collapsed( + #"realization", iris.analysis.MEAN) + #current_forecast_variance = cube.collapsed( + #"realization", iris.analysis.VARIANCE) + #raw_forecast = cube.copy() + + #predictor_and_variance = CubeList( + #[current_forecast_predictor, current_forecast_variance]) + + #plugin = Plugin() + #result = plugin.process(predictor_and_variance, raw_forecast) + #self.assertEqual(len(raw_forecast.coord("realization").points), + #len(result.coord("percentile").points)) if __name__ == '__main__': From 59af245d173eb904d2be3abcdec556ea9a74a43b Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 15:18:03 +0100 Subject: [PATCH 0118/1367] Fixes to make unit tests work by replacing the data being checked. Pep8 corrections added and print statements removed. --- .../ensemble_copula_coupling.py | 1 - .../ensemble_copula_coupling_utilities.py | 2 - ...oupling_EnsembleCopulaCouplingUtilities.py | 4 +- ..._GeneratePercentilesFromMeanAndVariance.py | 439 +++++++++--------- ...ng_GeneratePercentilesFromProbabilities.py | 48 +- ...ble_copula_coupling_ResamplePercentiles.py | 12 +- 6 files changed, 251 insertions(+), 255 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 3d7ed2c8b4..fe66cace5d 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -191,7 +191,6 @@ def _sample_percentiles( (forecast_at_reshaped_percentiles.shape[0], len(desired_percentiles)))) for index in range(forecast_at_reshaped_percentiles.shape[0]): - print "forecast_at_reshaped_percentiles[index, :] = ", forecast_at_reshaped_percentiles[index, :] forecast_at_interpolated_percentiles[index, :] = np.interp( desired_percentiles, original_percentiles, forecast_at_reshaped_percentiles[index, :]) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index d357f36093..3fea9ea8c2 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -292,9 +292,7 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( msg = ("A {} coordinate is not available on the {} cube.".format( input_probabilistic_dimension_name, original_cube)) raise CoordinateNotFoundError(msg) - print "array_to_reshape = ", array_to_reshape array_to_reshape = array_to_reshape.T shape_to_reshape_to = ( [output_probabilistic_dimension_length] + shape_to_reshape_to) - print "shape_to_reshape_to = ", shape_to_reshape_to return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index f12f771131..dd7bacaa48 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -76,7 +76,7 @@ def test_another_example(self): expected contents. """ expected = np.array( - [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) + [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) result = concatenate_2d_array_with_2d_array_endpoints( percentiles, -100, 10000) @@ -438,7 +438,7 @@ def test_data_check(self): """ Test that the data has been reshaped correctly. """ - expected = np.array([[[[ 4., 6.], + expected = np.array([[[[4., 6.], [8., 6.85714286]], [[8.85714286, 10.85714286], [5.42857143, 7.42857143]]], diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 4342f6bccf..effa6c5854 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -60,74 +60,74 @@ def setUp(self): _add_forecast_reference_time_and_forecast_period( set_up_spot_temperature_cube())) - #def test_check_data(self): - #""" - #Test that the plugin returns an Iris.cube.Cube matching the expected - #data values when a cube containing mean and variance is passed in. - #The resulting data values are the percentiles, which have been - #generated. - #""" - #data = np.array([[[[225.56812863, 229.48333333, 233.39853804], - #[236.81812863, 240.73333333, 244.64853804], - #[248.06812863, 251.98333333, 255.89853804]]], - #[[[259.31812863, 263.23333333, 267.14853804], - #[270.56812863, 274.48333333, 278.39853804], - #[281.81812863, 285.73333333, 289.64853804]]], - #[[[293.06812863, 296.98333333, 300.89853804], - #[304.31812863, 308.23333333, 312.14853804], - #[315.56812863, 319.48333333, 323.39853804]]]]) - - #cube = self.current_temperature_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = [0.1, 0.5, 0.9] - #plugin = Plugin() - #result = plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, - #percentiles) - #self.assertIsInstance(result, Cube) - #self.assertArrayAlmostEqual(result.data, data) - - #def test_simple_data(self): - #""" - #Test that the plugin returns the expected values for the generated - #percentiles when an idealised set of data values between 1 and 3 - #is used to create the mean and the variance. - #""" - #data = np.array([[[[1, 1, 1], - #[1, 1, 1], - #[1, 1, 1]]], - #[[[2, 2, 2], - #[2, 2, 2], - #[2, 2, 2]]], - #[[[3, 3, 3], - #[3, 3, 3], - #[3, 3, 3]]]]) - - #result_data = np.array([[[[0.71844843, 0.71844843, 0.71844843], - #[0.71844843, 0.71844843, 0.71844843], - #[0.71844843, 0.71844843, 0.71844843]]], - #[[[2., 2., 2.], - #[2., 2., 2.], - #[2., 2., 2.]]], - #[[[3.28155157, 3.28155157, 3.28155157], - #[3.28155157, 3.28155157, 3.28155157], - #[3.28155157, 3.28155157, 3.28155157]]]]) - - #cube = self.current_temperature_forecast_cube - #cube.data = data - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = [0.1, 0.5, 0.9] - #plugin = Plugin() - #result = plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, - #percentiles) - #self.assertArrayAlmostEqual(result.data, result_data) + def test_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube matching the expected + data values when a cube containing mean and variance is passed in. + The resulting data values are the percentiles, which have been + generated. + """ + data = np.array([[[[225.56812863, 229.48333333, 233.39853804], + [236.81812863, 240.73333333, 244.64853804], + [248.06812863, 251.98333333, 255.89853804]]], + [[[259.31812863, 263.23333333, 267.14853804], + [270.56812863, 274.48333333, 278.39853804], + [281.81812863, 285.73333333, 289.64853804]]], + [[[293.06812863, 296.98333333, 300.89853804], + [304.31812863, 308.23333333, 312.14853804], + [315.56812863, 319.48333333, 323.39853804]]]]) + + cube = self.current_temperature_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual(result.data, data) + + def test_simple_data(self): + """ + Test that the plugin returns the expected values for the generated + percentiles when an idealised set of data values between 1 and 3 + is used to create the mean and the variance. + """ + data = np.array([[[[1, 1, 1], + [1, 1, 1], + [1, 1, 1]]], + [[[2, 2, 2], + [2, 2, 2], + [2, 2, 2]]], + [[[3, 3, 3], + [3, 3, 3], + [3, 3, 3]]]]) + + result_data = np.array([[[[0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157]]], + [[[0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157]]], + [[[0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157], + [0.71844843, 2., 3.28155157]]]]) + + cube = self.current_temperature_forecast_cube + cube.data = data + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + self.assertArrayAlmostEqual(result.data, result_data) def test_if_identical_data(self): """ @@ -164,160 +164,159 @@ def test_if_identical_data(self): result = plugin._mean_and_variance_to_percentiles( current_forecast_predictor, current_forecast_variance, percentiles) - print "result.data = ", repr(result.data) self.assertArrayAlmostEqual(result.data, result_data) - #def test_if_nearly_identical_data(self): - #""" - #Test that the plugin returns the expected values, if every - #percentile has an identical value. This causes an issue because - #the default for the underlying scipy function is to yield a NaN for - #tied values. For this application, any NaN values are overwritten with - #the predicted mean value for all probability thresholds. - #""" - #data = np.array([[[[1., 1., 1.], - #[4., 2., 2.], - #[3., 3., 3.]]], - #[[[1., 1., 1.], - #[2., 2., 2.], - #[3., 3., 3.]]], - #[[[1., 1., 1.], - #[2., 2., 2.], - #[3., 3., 3.]]]]) - - #result_data = np.array([[[[1., 1., 1.], - #[1.186858, 2., 2.], - #[3., 3., 3.]]], - #[[[1., 1., 1.], - #[2.66666667, 2., 2.], - #[3., 3., 3.]]], - #[[[1., 1., 1.], - #[4.14647495, 2., 2.], - #[3., 3., 3.]]]]) - - #cube = self.current_temperature_forecast_cube - #cube.data = data - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = [0.1, 0.5, 0.9] - #plugin = Plugin() - #result = plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, - #percentiles) - #self.assertArrayAlmostEqual(result.data, result_data) - - #def test_many_percentiles(self): - #""" - #Test that the plugin returns an iris.cube.Cube if many percentiles - #are requested. - #""" - #cube = self.current_temperature_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = np.linspace(0.01, 0.99, num=1000, endpoint=True) - #plugin = Plugin() - #result = plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, percentiles) - #self.assertIsInstance(result, Cube) - - #def test_negative_percentiles(self): - #""" - #Test that the plugin returns the expected values for the - #percentiles if negative probabilities are requested. - #""" - #cube = self.current_temperature_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = [-0.1, 0.1] - #plugin = Plugin() - #msg = "NaNs are present within the result for the" - #with self.assertRaisesRegexp(ValueError, msg): - #plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, - #percentiles) - - #def test_spot_forecasts_check_data(self): - #""" - #Test that the plugin returns an Iris.cube.Cube matching the expected - #data values when a cube containing mean and variance is passed in. - #The resulting data values are the percentiles, which have been - #generated for a spot forecast. - #""" - #data = np.array([[[225.56812863, 236.81812863, 248.06812863, - #259.31812863, 270.56812863, 281.81812863, - #293.06812863, 304.31812863, 315.56812863]], - #[[229.48333333, 240.73333333, 251.98333333, - #263.23333333, 274.48333333, 285.73333333, - #296.98333333, 308.23333333, 319.48333333]], - #[[233.39853804, 244.64853804, 255.89853804, - #267.14853804, 278.39853804, 289.64853804, - #300.89853804, 312.14853804, 323.39853804]]]) - - #cube = self.current_temperature_spot_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #percentiles = [0.1, 0.5, 0.9] - #plugin = Plugin() - #result = plugin._mean_and_variance_to_percentiles( - #current_forecast_predictor, current_forecast_variance, - #percentiles) - #self.assertIsInstance(result, Cube) - #self.assertArrayAlmostEqual(result.data, data) - - -#class Test_process(IrisTest): - - #"""Test the process plugin.""" - - #def setUp(self): - #"""Set up temperature cube.""" - #self.current_temperature_forecast_cube = ( - #_add_forecast_reference_time_and_forecast_period( - #set_up_temperature_cube())) - - #def test_basic(self): - #"""Test that the plugin returns an Iris.cube.Cube.""" - #cube = self.current_temperature_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #raw_forecast = cube.copy() - - #predictor_and_variance = CubeList( - #[current_forecast_predictor, current_forecast_variance]) - - #plugin = Plugin() - #result = plugin.process(predictor_and_variance, raw_forecast) - #self.assertIsInstance(result, Cube) - - #def test_number_of_percentiles(self): - #""" - #Test that the plugin returns a cube with the expected number of - #percentiles. - #""" - #cube = self.current_temperature_forecast_cube - #current_forecast_predictor = cube.collapsed( - #"realization", iris.analysis.MEAN) - #current_forecast_variance = cube.collapsed( - #"realization", iris.analysis.VARIANCE) - #raw_forecast = cube.copy() - - #predictor_and_variance = CubeList( - #[current_forecast_predictor, current_forecast_variance]) - - #plugin = Plugin() - #result = plugin.process(predictor_and_variance, raw_forecast) - #self.assertEqual(len(raw_forecast.coord("realization").points), - #len(result.coord("percentile").points)) + def test_if_nearly_identical_data(self): + """ + Test that the plugin returns the expected values, if every + percentile has an identical value. This causes an issue because + the default for the underlying scipy function is to yield a NaN for + tied values. For this application, any NaN values are overwritten with + the predicted mean value for all probability thresholds. + """ + data = np.array([[[[1., 1., 1.], + [4., 2., 2.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]]]) + + result_data = np.array([[[[1., 1., 1.], + [1., 1., 1.], + [1., 1., 1.]]], + [[[1.18685838, 2.66666667, 4.14647495], + [2., 2., 2.], + [2., 2., 2.]]], + [[[3., 3., 3.], + [3., 3., 3.], + [3., 3., 3.]]]]) + + cube = self.current_temperature_forecast_cube + cube.data = data + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + self.assertArrayAlmostEqual(result.data, result_data) + + def test_many_percentiles(self): + """ + Test that the plugin returns an iris.cube.Cube if many percentiles + are requested. + """ + cube = self.current_temperature_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = np.linspace(0.01, 0.99, num=1000, endpoint=True) + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, percentiles) + self.assertIsInstance(result, Cube) + + def test_negative_percentiles(self): + """ + Test that the plugin returns the expected values for the + percentiles if negative probabilities are requested. + """ + cube = self.current_temperature_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = [-0.1, 0.1] + plugin = Plugin() + msg = "NaNs are present within the result for the" + with self.assertRaisesRegexp(ValueError, msg): + plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + + def test_spot_forecasts_check_data(self): + """ + Test that the plugin returns an Iris.cube.Cube matching the expected + data values when a cube containing mean and variance is passed in. + The resulting data values are the percentiles, which have been + generated for a spot forecast. + """ + data = np.array([[[225.56812863, 229.48333333, 233.39853804, + 236.81812863, 240.73333333, 244.64853804, + 248.06812863, 251.98333333, 255.89853804]], + [[259.31812863, 263.23333333, 267.14853804, + 270.56812863, 274.48333333, 278.39853804, + 281.81812863, 285.73333333, 289.64853804]], + [[293.06812863, 296.98333333, 300.89853804, + 304.31812863, 308.23333333, 312.14853804, + 315.56812863, 319.48333333, 323.39853804]]]) + + cube = self.current_temperature_spot_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + percentiles = [0.1, 0.5, 0.9] + plugin = Plugin() + result = plugin._mean_and_variance_to_percentiles( + current_forecast_predictor, current_forecast_variance, + percentiles) + self.assertIsInstance(result, Cube) + self.assertArrayAlmostEqual(result.data, data) + + +class Test_process(IrisTest): + + """Test the process plugin.""" + + def setUp(self): + """Set up temperature cube.""" + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_temperature_cube())) + + def test_basic(self): + """Test that the plugin returns an Iris.cube.Cube.""" + cube = self.current_temperature_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + raw_forecast = cube.copy() + + predictor_and_variance = CubeList( + [current_forecast_predictor, current_forecast_variance]) + + plugin = Plugin() + result = plugin.process(predictor_and_variance, raw_forecast) + self.assertIsInstance(result, Cube) + + def test_number_of_percentiles(self): + """ + Test that the plugin returns a cube with the expected number of + percentiles. + """ + cube = self.current_temperature_forecast_cube + current_forecast_predictor = cube.collapsed( + "realization", iris.analysis.MEAN) + current_forecast_variance = cube.collapsed( + "realization", iris.analysis.VARIANCE) + raw_forecast = cube.copy() + + predictor_and_variance = CubeList( + [current_forecast_predictor, current_forecast_variance]) + + plugin = Plugin() + result = plugin.process(predictor_and_variance, raw_forecast) + self.assertEqual(len(raw_forecast.coord("realization").points), + len(result.coord("percentile").points)) if __name__ == '__main__': diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 5e85fb7ddd..284b1b771c 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -269,14 +269,14 @@ def test_check_data(self): data values for the percentiles. """ data = np.array([[[[15.8, 8., 10.4], - [-16., 8., -30.4], - [-30.4, -34., -35.2]]], - [[[31., 10., 12.], - [10., 10., 8.], - [8., -10., -16.]]], - [[[46.2, 31., 42.4], - [31., 11.6, 12.], - [11., 9., 3.2]]]]) + [-16., 8., -30.4], + [-30.4, -34., -35.2]]], + [[[31., 10., 12.], + [10., 10., 8.], + [8., -10., -16.]]], + [[[46.2, 31., 42.4], + [31., 11.6, 12.], + [11., 9., 3.2]]]]) cube = self.current_temperature_forecast_cube percentiles = [0.1, 0.5, 0.9] @@ -432,14 +432,14 @@ def test_check_data_specifying_percentiles(self): data values for a specific number of percentiles. """ data = np.array([[[[21.5, 8.75, 11.], - [8.33333333, 8.75, -16.], - [-16., -25., -28.]]], - [[[31., 10., 12.], - [10., 10., 8.], - [ 8., -10., -16.]]], - [[[40.5, 11.66666667, 31.], - [11.66666667, 11., 10.5], - [9.66666667, 5., -4.]]]]) + [8.33333333, 8.75, -16.], + [-16., -25., -28.]]], + [[[31., 10., 12.], + [10., 10., 8.], + [8., -10., -16.]]], + [[[40.5, 11.66666667, 31.], + [11.66666667, 11., 10.5], + [9.66666667, 5., -4.]]]]) cube = self.current_temperature_forecast_cube percentiles = [0.1, 0.5, 0.9] @@ -454,14 +454,14 @@ def test_check_data_not_specifying_percentiles(self): data values without specifying the number of percentiles. """ data = np.array([[[[21.5, 8.75, 11.], - [8.33333333, 8.75, -16.], - [-16., -25., -28.]]], - [[[31., 10., 12.], - [10., 10., 8.], - [ 8., -10., -16.]]], - [[[40.5, 11.66666667, 31.], - [11.66666667, 11., 10.5], - [9.66666667, 5., -4.]]]]) + [8.33333333, 8.75, -16.], + [-16., -25., -28.]]], + [[[31., 10., 12.], + [10., 10., 8.], + [8., -10., -16.]]], + [[[40.5, 11.66666667, 31.], + [11.66666667, 11., 10.5], + [9.66666667, 5., -4.]]]]) cube = self.current_temperature_forecast_cube plugin = Plugin() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 1bb831f2aa..328e176e89 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -215,13 +215,13 @@ def test_check_data(self): """ data = np.array([[[[4.5, 5.125, 5.75], [6.375, 7., 7.625], - [8.25 , 8.875, 9.5]]], - [[[6.5 , 7.125, 7.75], + [8.25, 8.875, 9.5]]], + [[[6.5, 7.125, 7.75], [8.375, 9., 9.625], - [10.25 , 10.875, 11.5]]], - [[[7.5 , 8.125, 8.75], + [10.25, 10.875, 11.5]]], + [[[7.5, 8.125, 8.75], [9.375, 10., 10.625], - [11.25 , 11.875, 12.5]]]]) + [11.25, 11.875, 12.5]]]]) cube = self.percentile_cube percentiles = [0.2, 0.6, 0.8] @@ -371,7 +371,7 @@ def test_lots_of_percentiles(self): [10., 10.625, 11.25]]], [[[6.75, 7.375, 8.], [8.625, 9.25, 9.875], - [10.5, 11.125, 11.75]]], + [10.5, 11.125, 11.75]]], [[[7.25, 7.875, 8.5], [9.125, 9.75, 10.375], [11., 11.625, 12.25]]], From 2e8cf77f54fd9a6b32844a68b35f40c35cdad41e Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 23 May 2017 16:26:38 +0100 Subject: [PATCH 0119/1367] Edits to remove unnecessary transposing of the data, in order to produce correct resuls, and unit tests have been corrected. --- .../ensemble_copula_coupling.py | 2 - ...oupling_EnsembleCopulaCouplingUtilities.py | 2 +- ..._GeneratePercentilesFromMeanAndVariance.py | 80 +++++++++---------- 3 files changed, 41 insertions(+), 43 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index fe66cace5d..042359ad58 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -529,8 +529,6 @@ def _mean_and_variance_to_percentiles( "function.") raise ValueError(msg) - result = result.T - # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. result = ( diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index dd7bacaa48..38deb7dcf8 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -423,7 +423,7 @@ def test_basic(self): def test_size_of_array(self): """ Test that the result have the expected size for the - probabilistic dimension and is generally of the expected size. + probabilistic dimension and is generally of the expected size. """ cube = self.current_temperature_forecast_cube input_array = cube.data diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index effa6c5854..2f4582c72e 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -67,15 +67,15 @@ def test_check_data(self): The resulting data values are the percentiles, which have been generated. """ - data = np.array([[[[225.56812863, 229.48333333, 233.39853804], - [236.81812863, 240.73333333, 244.64853804], - [248.06812863, 251.98333333, 255.89853804]]], - [[[259.31812863, 263.23333333, 267.14853804], - [270.56812863, 274.48333333, 278.39853804], - [281.81812863, 285.73333333, 289.64853804]]], - [[[293.06812863, 296.98333333, 300.89853804], - [304.31812863, 308.23333333, 312.14853804], - [315.56812863, 319.48333333, 323.39853804]]]]) + data = np.array([[[[225.56812863, 236.81812863, 248.06812863], + [259.31812863, 270.56812863, 281.81812863], + [293.06812863, 304.31812863, 315.56812863]]], + [[[229.48333333, 240.73333333, 251.98333333], + [263.23333333, 274.48333333, 285.73333333], + [296.98333333, 308.23333333, 319.48333333]]], + [[[233.39853804, 244.64853804, 255.89853804], + [267.14853804, 278.39853804, 289.64853804], + [300.89853804, 312.14853804, 323.39853804]]]]) cube = self.current_temperature_forecast_cube current_forecast_predictor = cube.collapsed( @@ -106,15 +106,15 @@ def test_simple_data(self): [3, 3, 3], [3, 3, 3]]]]) - result_data = np.array([[[[0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157]]], - [[[0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157]]], - [[[0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157], - [0.71844843, 2., 3.28155157]]]]) + result_data = np.array([[[[0.71844843, 0.71844843, 0.71844843], + [0.71844843, 0.71844843, 0.71844843], + [0.71844843, 0.71844843, 0.71844843]]], + [[[2., 2., 2.], + [2., 2., 2.], + [2., 2., 2.]]], + [[[3.28155157, 3.28155157, 3.28155157], + [3.28155157, 3.28155157, 3.28155157], + [3.28155157, 3.28155157, 3.28155157]]]]) cube = self.current_temperature_forecast_cube cube.data = data @@ -144,13 +144,13 @@ def test_if_identical_data(self): data = np.repeat(data[np.newaxis, np.newaxis, :, :], 3, axis=0) result_data = np.array([[[[1., 1., 1.], - [1., 1., 1.], - [1., 1., 1.]]], - [[[2., 2., 2.], [2., 2., 2.], - [2., 2., 2.]]], - [[[3., 3., 3.], - [3., 3., 3.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [2., 2., 2.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [2., 2., 2.], [3., 3., 3.]]]]) cube = self.current_temperature_forecast_cube @@ -185,13 +185,13 @@ def test_if_nearly_identical_data(self): [3., 3., 3.]]]]) result_data = np.array([[[[1., 1., 1.], - [1., 1., 1.], - [1., 1., 1.]]], - [[[1.18685838, 2.66666667, 4.14647495], - [2., 2., 2.], - [2., 2., 2.]]], - [[[3., 3., 3.], - [3., 3., 3.], + [1.18685838, 2., 2.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [2.66666667, 2., 2.], + [3., 3., 3.]]], + [[[1., 1., 1.], + [4.14647495, 2., 2.], [3., 3., 3.]]]]) cube = self.current_temperature_forecast_cube @@ -248,15 +248,15 @@ def test_spot_forecasts_check_data(self): The resulting data values are the percentiles, which have been generated for a spot forecast. """ - data = np.array([[[225.56812863, 229.48333333, 233.39853804, - 236.81812863, 240.73333333, 244.64853804, - 248.06812863, 251.98333333, 255.89853804]], - [[259.31812863, 263.23333333, 267.14853804, - 270.56812863, 274.48333333, 278.39853804, - 281.81812863, 285.73333333, 289.64853804]], - [[293.06812863, 296.98333333, 300.89853804, - 304.31812863, 308.23333333, 312.14853804, - 315.56812863, 319.48333333, 323.39853804]]]) + data = np.array([[[225.56812863, 236.81812863, 248.06812863, + 259.31812863, 270.56812863, 281.81812863, + 293.06812863, 304.31812863, 315.56812863]], + [[229.48333333, 240.73333333, 251.98333333, + 263.23333333, 274.48333333, 285.73333333, + 296.98333333, 308.23333333, 319.48333333]], + [[233.39853804, 244.64853804, 255.89853804, + 267.14853804, 278.39853804, 289.64853804, + 300.89853804, 312.14853804, 323.39853804]]]) cube = self.current_temperature_spot_forecast_cube current_forecast_predictor = cube.collapsed( From fb8062aa48c9787b02e6c6b2c3c316c717a27e67 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 09:40:07 +0100 Subject: [PATCH 0120/1367] Improvements to unit tests and edits to make sure transposing is working as desired. --- .../ensemble_copula_coupling.py | 19 +-- .../ensemble_copula_coupling_utilities.py | 7 +- ...oupling_EnsembleCopulaCouplingUtilities.py | 122 +++++++++++------- 3 files changed, 88 insertions(+), 60 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 042359ad58..31a06b868e 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -195,6 +195,10 @@ def _sample_percentiles( desired_percentiles, original_percentiles, forecast_at_reshaped_percentiles[index, :]) + # Transpose data + forecast_at_interpolated_percentiles = ( + forecast_at_interpolated_percentiles.T) + # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. forecast_at_percentiles_data = ( @@ -382,6 +386,9 @@ def _probabilities_to_percentiles( percentiles, probabilities_for_cdf[index, :], threshold_points) + # Transpose data + forecast_at_percentiles = forecast_at_percentiles.T + # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. forecast_at_percentiles = ( @@ -529,6 +536,9 @@ def _mean_and_variance_to_percentiles( "function.") raise ValueError(msg) + # Transpose data + result = result.T + # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. result = ( @@ -536,15 +546,6 @@ def _mean_and_variance_to_percentiles( result, calibrated_forecast_predictor, "realization", len(percentiles))) - shape_to_reshape_to = list(calibrated_forecast_predictor.shape) - if calibrated_forecast_predictor.coord_dims("realization"): - realization_coord_position = ( - calibrated_forecast_predictor.coord_dims("realization")) - shape_to_reshape_to.pop(realization_coord_position[0]) - shape_to_reshape_to = [len(percentiles)] + shape_to_reshape_to - - result = result.reshape(shape_to_reshape_to) - for template_cube in calibrated_forecast_predictor.slices_over( "realization"): template_cube.remove_coord("realization") diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 3fea9ea8c2..d315761161 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -246,8 +246,10 @@ def insert_lower_and_upper_endpoint_to_1d_array( array_1d : Numpy array 1d array of values padded with the low_endpoint and high_endpoint. """ - array_1d = np.insert(array_1d, 0, low_endpoint) - array_1d = np.append(array_1d, high_endpoint) + lower_array = np.array([low_endpoint]) + upper_array = np.array([high_endpoint]) + array_1d = np.concatenate( + (lower_array, array_1d, upper_array), axis=1) return array_1d @@ -292,7 +294,6 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( msg = ("A {} coordinate is not available on the {} cube.".format( input_probabilistic_dimension_name, original_cube)) raise CoordinateNotFoundError(msg) - array_to_reshape = array_to_reshape.T shape_to_reshape_to = ( [output_probabilistic_dimension_length] + shape_to_reshape_to) return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 38deb7dcf8..a01b98b809 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -34,6 +34,7 @@ """ import unittest +from cf_units import Unit from iris.coords import DimCoord from iris.cube import Cube from iris.exceptions import CoordinateNotFoundError @@ -64,9 +65,9 @@ def test_basic(self): Basic test that the result is a numpy array with the expected contents. """ expected = np.array([[0, 0.2, 0.5, 0.8, 1]]) - percentiles = np.array([[0.2, 0.5, 0.8]]) + input_array = np.array([[0.2, 0.5, 0.8]]) result = concatenate_2d_array_with_2d_array_endpoints( - percentiles, 0, 1) + input_array, 0, 1) self.assertIsInstance(result, np.ndarray) self.assertArrayAlmostEqual(result, expected) @@ -77,9 +78,9 @@ def test_another_example(self): """ expected = np.array( [[-100, -40, 200, 1000, 10000], [-100, -40, 200, 1000, 10000]]) - percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) + input_array = np.array([[-40, 200, 1000], [-40, 200, 1000]]) result = concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) + input_array, -100, 10000) self.assertIsInstance(result, np.ndarray) self.assertArrayAlmostEqual(result, expected) @@ -88,22 +89,22 @@ def test_1d_input(self): Test that a 1d input array results in the expected error. """ expected = np.array([-100, -40, 200, 1000, 10000]) - percentiles = np.array([-40, 200, 1000]) + input_array = np.array([-40, 200, 1000]) msg = "all the input arrays must have same number of dimensions" with self.assertRaisesRegexp(ValueError, msg): concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) + input_array, -100, 10000) def test_3d_input(self): """ Test that a 3d input array results in the expected error. """ expected = np.array([[[-100, -40, 200, 1000, 10000]]]) - percentiles = np.array([[[-40, 200, 1000]]]) + input_array = np.array([[[-40, 200, 1000]]]) msg = "all the input arrays must have same number of dimensions" with self.assertRaisesRegexp(ValueError, msg): concatenate_2d_array_with_2d_array_endpoints( - percentiles, -100, 10000) + input_array, -100, 10000) class Test_create_cube_with_percentiles(IrisTest): @@ -322,20 +323,20 @@ def setUp(self): def test_basic(self): """Test that the result is a numpy array.""" - cube = self.current_temperature_forecast_cube - cube_units = cube.coord("probability_above_threshold").units - result = get_bounds_of_distribution(cube.name(), cube_units) + cube_name = "air_temperature" + cube_units = Unit("degreesC") + result = get_bounds_of_distribution(cube_name, cube_units) self.assertIsInstance(result, np.ndarray) def test_check_data(self): """ Test that the expected results are returned for the bounds_pairing. """ - cube = self.current_temperature_forecast_cube - cube_units = cube.coord("probability_above_threshold").units + cube_name = "air_temperature" + cube_units = Unit("degreesC") bounds_pairing = (-40, 50) result = ( - get_bounds_of_distribution(cube.name(), cube_units)) + get_bounds_of_distribution(cube_name, cube_units)) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_unit_conversion(self): @@ -344,25 +345,22 @@ def test_check_unit_conversion(self): if the units of the bounds_pairings need to be converted to match the units of the forecast. """ - cube = self.current_temperature_forecast_cube - cube.coord("probability_above_threshold").convert_units("fahrenheit") - cube_units = cube.coord("probability_above_threshold").units + cube_name = "air_temperature" + cube_units = Unit("fahrenheit") bounds_pairing = (-40, 122) # In fahrenheit result = ( - get_bounds_of_distribution(cube.name(), cube_units)) + get_bounds_of_distribution(cube_name, cube_units)) self.assertArrayAlmostEqual(result, bounds_pairing) def test_check_exception_is_raised(self): """ Test that the expected results are returned for the bounds_pairing. """ - cube = self.current_temperature_forecast_cube - cube.standard_name = None - cube.long_name = "Nonsense" - cube_units = cube.coord("probability_above_threshold").units + cube_name = "nonsense" + cube_units = Unit("degreesC") msg = "The forecast_cube name" with self.assertRaisesRegexp(KeyError, msg): - get_bounds_of_distribution(cube.name(), cube_units) + get_bounds_of_distribution(cube_name, cube_units) class Test_insert_lower_and_upper_endpoint_to_1d_array(IrisTest): @@ -392,6 +390,19 @@ def test_another_example(self): self.assertIsInstance(result, np.ndarray) self.assertArrayAlmostEqual(result, expected) + def test_2d_example(self): + """ + Another basic test that the result is a numpy array with the + expected contents. + """ + expected = np.array([[-100, -40, 200, 1000, 10000], + [-100, -40, 200, 1000, 10000]]) + percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) + msg = "all the input arrays must have same number of dimensions" + with self.assertRaisesRegexp(ValueError, msg): + insert_lower_and_upper_endpoint_to_1d_array( + percentiles, -100, 10000) + class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( IrisTest): @@ -420,7 +431,7 @@ def test_basic(self): cube.data, cube, "percentile", plen)) self.assertIsInstance(reshaped_array, np.ndarray) - def test_size_of_array(self): + def test_percentile_is_dimension_coordinate(self): """ Test that the result have the expected size for the probabilistic dimension and is generally of the expected size. @@ -432,24 +443,39 @@ def test_size_of_array(self): reshape_array_to_have_probabilistic_dimension_at_the_front( cube.data, cube, "percentile", plen)) self.assertEqual(reshaped_array.shape[0], plen) - self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) + self.assertEqual(reshaped_array.shape, cube.data.shape) + self.assertArrayAlmostEqual(reshaped_array, cube.data) + + def test_percentile_is_not_dimension_coordinate(self): + """ + Test the array size, if the percentile coordinate is not a dimension + coordinate on the cube. + """ + expected = np.array([[[[226.15, 237.4, 248.65], + [259.9, 271.15, 282.4], + [293.65, 304.9, 316.15]]]]) - def test_data_check(self): + cube = self.current_temperature_forecast_cube + for cube_slice in cube.slices_over("percentile"): + break + input_array = cube_slice.data + plen = len(cube_slice.coord("percentile").points) + reshaped_array = ( + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube_slice.data, cube_slice, "percentile", plen)) + self.assertEqual(reshaped_array.shape[0], plen) + self.assertEqual(reshaped_array.shape, (1, 1, 3, 3)) + self.assertArrayAlmostEqual(reshaped_array, expected) + + def test_percentile_is_dimension_coordinate_multiple_timesteps(self): """ - Test that the data has been reshaped correctly. + Test that the data has been reshaped correctly when multiple timesteps + are in the cube. """ - expected = np.array([[[[4., 6.], - [8., 6.85714286]], - [[8.85714286, 10.85714286], - [5.42857143, 7.42857143]]], - [[[9.42857143, 8.28571429], - [10.28571429, 12.28571429]], - [[4.71428571, 6.71428571], - [8.71428571, 7.57142857]]], - [[[9.57142857, 11.57142857], - [6.14285714, 8.14285714]], - [[10.14285714, 9.], - [11., 13.]]]]) + expected = np.array([[[[4., 4.71428571], + [5.42857143, 6.14285714]], + [[6.85714286, 7.57142857], + [8.28571429, 9.]]]]) data = np.tile(np.linspace(5, 10, 8), 3).reshape(3, 2, 2, 2) data[0] -= 1 @@ -460,30 +486,30 @@ def test_data_check(self): y_dimension_length=2) cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) - plen = len(cube.coord("percentile").points) + plen = 1 percentile_cube = ( _add_forecast_reference_time_and_forecast_period( cube, time_point=np.array([402295.0, 402296.0]))) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( - percentile_cube.data, percentile_cube, "percentile", plen)) + percentile_cube[0].data, percentile_cube, "percentile", plen)) self.assertArrayAlmostEqual(reshaped_array, expected) - def test_percentile_is_not_a_dimension_coordinate(self): + def test_percentile_is_dimension_coordinate_flattened_data(self): """ Test the array size, if the percentile coordinate is not a dimension coordinate on the cube. """ cube = self.current_temperature_forecast_cube - for cube_slice in cube.slices_over("percentile"): - break - input_array = cube_slice.data - plen = len(cube_slice.coord("percentile").points) + flattened_data = cube.data.flatten() + input_array = cube.data + plen = len(cube.coord("percentile").points) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( - cube_slice.data, cube_slice, "percentile", plen)) + flattened_data, cube, "percentile", plen)) self.assertEqual(reshaped_array.shape[0], plen) - self.assertEqual(reshaped_array.shape, (1, 1, 3, 3)) + self.assertEqual(reshaped_array.shape, (3, 1, 3, 3)) + self.assertArrayAlmostEqual(reshaped_array, cube.data) def test_missing_coordinate(self): """ From 546880e0e1b9d9eb790676bff16a98123b6d0851 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 11:12:23 +0100 Subject: [PATCH 0121/1367] Edits to improve transposing and other unit test improvements. --- .../ensemble_copula_coupling.py | 38 +++++++------------ ...ng_GeneratePercentilesFromProbabilities.py | 26 +++++++++++++ ...ble_copula_coupling_ResamplePercentiles.py | 30 +++++++++++++-- 3 files changed, 66 insertions(+), 28 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 31a06b868e..8fbd4c06d7 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -188,17 +188,13 @@ def _sample_percentiles( forecast_at_interpolated_percentiles = ( np.empty( - (forecast_at_reshaped_percentiles.shape[0], - len(desired_percentiles)))) + (len(desired_percentiles), + forecast_at_reshaped_percentiles.shape[0]))) for index in range(forecast_at_reshaped_percentiles.shape[0]): - forecast_at_interpolated_percentiles[index, :] = np.interp( + forecast_at_interpolated_percentiles[:, index] = np.interp( desired_percentiles, original_percentiles, forecast_at_reshaped_percentiles[index, :]) - # Transpose data - forecast_at_interpolated_percentiles = ( - forecast_at_interpolated_percentiles.T) - # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. forecast_at_percentiles_data = ( @@ -367,6 +363,10 @@ def _probabilities_to_percentiles( # Invert probabilities probabilities_for_cdf = 1 - prob_slices + threshold_points, probabilities_for_cdf = ( + self._add_bounds_to_thresholds_and_probabilities( + threshold_points, probabilities_for_cdf, bounds_pairing)) + if np.any(np.diff(probabilities_for_cdf) < 0): msg = ("The probability values used to construct the " "Cumulative Distribution Function (CDF) " @@ -375,20 +375,13 @@ def _probabilities_to_percentiles( "The probabilities are {}".format(probabilities_for_cdf)) raise ValueError(msg) - threshold_points, probabilities_for_cdf = ( - self._add_bounds_to_thresholds_and_probabilities( - threshold_points, probabilities_for_cdf, bounds_pairing)) - forecast_at_percentiles = ( - np.empty((probabilities_for_cdf.shape[0], len(percentiles)))) + np.empty((len(percentiles), probabilities_for_cdf.shape[0]))) for index in range(probabilities_for_cdf.shape[0]): - forecast_at_percentiles[index, :] = np.interp( + forecast_at_percentiles[:, index] = np.interp( percentiles, probabilities_for_cdf[index, :], threshold_points) - # Transpose data - forecast_at_percentiles = forecast_at_percentiles.T - # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. forecast_at_percentiles = ( @@ -511,15 +504,15 @@ def _mean_and_variance_to_percentiles( calibrated_forecast_variance_data = ( calibrated_forecast_variance.data.flatten()) - result = np.zeros((calibrated_forecast_predictor_data.shape[0], - len(percentiles))) + result = np.zeros((len(percentiles), + calibrated_forecast_predictor_data.shape[0])) # Loop over percentiles, and use a normal distribution with the mean # and variance to calculate the values at each percentile. for index, percentile in enumerate(percentiles): percentile_list = np.repeat( percentile, len(calibrated_forecast_predictor_data)) - result[:, index] = norm.ppf( + result[index, :] = norm.ppf( percentile_list, loc=calibrated_forecast_predictor_data, scale=np.sqrt(calibrated_forecast_variance_data)) # If percent point function (PPF) returns NaNs, fill in @@ -527,8 +520,8 @@ def _mean_and_variance_to_percentiles( # variance is zero. Therefore, if the variance is zero, the mean # value is used for all gridpoints with a NaN. if np.any(calibrated_forecast_variance_data == 0): - nan_index = np.argwhere(np.isnan(result[:, index])) - result[nan_index, index] = ( + nan_index = np.argwhere(np.isnan(result[index, :])) + result[index, nan_index] = ( calibrated_forecast_predictor_data[nan_index]) if np.any(np.isnan(result)): msg = ("NaNs are present within the result for the {} " @@ -536,9 +529,6 @@ def _mean_and_variance_to_percentiles( "function.") raise ValueError(msg) - # Transpose data - result = result.T - # Reshape forecast_at_percentiles, so the percentiles dimension is # first, and any other dimension coordinates follow. result = ( diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 284b1b771c..7b2af8579b 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -153,6 +153,32 @@ def test_basic(self): cube, percentiles, bounds_pairing) self.assertIsInstance(result, Cube) + def test_transpose_cube_dimensions(self): + """ + Test that the plugin returns an the expected data, when comparing + input cubes which have dimensions in a different order. + """ + # Calculate result for nontransposed cube. + cube = self.current_temperature_forecast_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + nontransposed_result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + + # Calculate result for transposed cube. + # Original cube dimensions are [P, T, Y, X]. + # Transposed cube dimensions are [X, Y, T, P]. + cube.transpose([3, 2, 1, 0]) + transposed_result = plugin._probabilities_to_percentiles( + cube, percentiles, bounds_pairing) + + # Result cube will be [P, X, Y, T] + # Transpose cube to be [P, T, Y, X] + transposed_result.transpose([0, 3, 2, 1]) + self.assertArrayAlmostEqual( + nontransposed_result.data, transposed_result.data) + def test_simple_check_data(self): """ Test that the plugin returns an Iris.cube.Cube with the expected diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 328e176e89..dc93943c70 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -182,6 +182,32 @@ def test_basic(self): cube, percentiles, bounds_pairing) self.assertIsInstance(result, Cube) + def test_transpose_cube_dimensions(self): + """ + Test that the plugin returns an the expected data, when comparing + input cubes which have dimensions in a different order. + """ + # Calculate result for nontransposed cube. + cube = self.percentile_cube + percentiles = [0.1, 0.5, 0.9] + bounds_pairing = (-40, 50) + plugin = Plugin() + nontransposed_result = plugin._sample_percentiles( + cube, percentiles, bounds_pairing) + + # Calculate result for transposed cube. + # Original cube dimensions are [P, T, Y, X]. + # Transposed cube dimensions are [X, Y, T, P]. + cube.transpose([3, 2, 1, 0]) + transposed_result = plugin._sample_percentiles( + cube, percentiles, bounds_pairing) + + # Result cube will be [P, X, Y, T] + # Transpose cube to be [P, T, Y, X] + transposed_result.transpose([0, 3, 2, 1]) + self.assertArrayAlmostEqual( + nontransposed_result.data, transposed_result.data) + def test_simple_check_data(self): """ Test that the plugin returns an Iris.cube.Cube with the expected @@ -253,7 +279,6 @@ def test_check_data_multiple_timesteps(self): data[0] -= 1 data[1] += 1 data[2] += 3 - print "data = ", data cube = set_up_cube(data, "air_temperature", "degreesC", timesteps=2, x_dimension_length=2, y_dimension_length=2) @@ -266,10 +291,8 @@ def test_check_data_multiple_timesteps(self): percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) plugin = Plugin() - print "cube.data = ", cube.data result = plugin._sample_percentiles( cube, percentiles, bounds_pairing) - print "result.data = ", repr(result.data) self.assertArrayAlmostEqual(result.data, expected) def test_check_single_threshold(self): @@ -441,7 +464,6 @@ def test_check_data_specifying_percentiles(self): percentiles = [0.25, 0.5, 0.75] plugin = Plugin() result = plugin.process(cube, no_of_percentiles=len(percentiles)) - print "result.data = ", repr(result.data) self.assertArrayAlmostEqual(result.data, data) def test_check_data_not_specifying_percentiles(self): From c31d055a44adccc8f47c25080610765371c55508 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 11:21:04 +0100 Subject: [PATCH 0122/1367] Remove axis keyword from concatenation. --- .../ensemble_copula_coupling_utilities.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index d315761161..8e8d1b8038 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -248,8 +248,7 @@ def insert_lower_and_upper_endpoint_to_1d_array( """ lower_array = np.array([low_endpoint]) upper_array = np.array([high_endpoint]) - array_1d = np.concatenate( - (lower_array, array_1d, upper_array), axis=1) + array_1d = np.concatenate((lower_array, array_1d, upper_array)) return array_1d From 36fc0c6c61ebc968591bba36a7471184ac2e5521 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 11:59:03 +0100 Subject: [PATCH 0123/1367] Some of the codacy fixes. --- .../ensemble_copula_coupling/ensemble_copula_coupling.py | 3 +-- .../tests/helper_functions_ensemble_calibration.py | 6 +++--- ...ble_copula_coupling_EnsembleCopulaCouplingUtilities.py | 8 -------- ...opula_coupling_GeneratePercentilesFromProbabilities.py | 6 +----- .../test_ensemble_copula_coupling_ResamplePercentiles.py | 8 ++------ 5 files changed, 7 insertions(+), 24 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 8fbd4c06d7..74f1176200 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -38,12 +38,11 @@ import iris from iris.exceptions import CoordinateNotFoundError -import cf_units as unit from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf, units_of_bounds_for_ecdf + import bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import (concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, create_percentiles, diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 196098bf64..8562fd9353 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -45,7 +45,7 @@ def set_up_probability_above_threshold_cube( data, phenomenon_standard_name, phenomenon_units, - forecast_thresholds=[8, 10, 12], timesteps=1, + forecast_thresholds=np.array([8, 10, 12]), timesteps=1, y_dimension_length=3, x_dimension_length=3): """ Create a cube containing multiple probability_above_threshold @@ -87,7 +87,7 @@ def set_up_probability_above_threshold_temperature_cube(): def set_up_probability_above_threshold_spot_cube( data, phenomenon_standard_name, phenomenon_units, - forecast_thresholds=[8, 10, 12], + forecast_thresholds=np.array([8, 10, 12]), y_dimension_length=9, x_dimension_length=9): """ Create a cube containing multiple realizations, where one of the @@ -132,7 +132,7 @@ def set_up_probability_above_threshold_spot_temperature_cube(): def set_up_cube(data, phenomenon_standard_name, phenomenon_units, - realizations=[0, 1, 2], timesteps=1, + realizations=np.array([0, 1, 2]), timesteps=1, y_dimension_length=3, x_dimension_length=3): """Create a cube containing multiple realizations.""" cube = Cube(data, standard_name=phenomenon_standard_name, diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index a01b98b809..ddfad902b6 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -47,8 +47,6 @@ concatenate_2d_array_with_2d_array_endpoints, get_bounds_of_distribution, reshape_array_to_have_probabilistic_dimension_at_the_front) -from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import ( set_up_cube, set_up_temperature_cube, set_up_spot_temperature_cube, @@ -88,7 +86,6 @@ def test_1d_input(self): """ Test that a 1d input array results in the expected error. """ - expected = np.array([-100, -40, 200, 1000, 10000]) input_array = np.array([-40, 200, 1000]) msg = "all the input arrays must have same number of dimensions" with self.assertRaisesRegexp(ValueError, msg): @@ -99,7 +96,6 @@ def test_3d_input(self): """ Test that a 3d input array results in the expected error. """ - expected = np.array([[[-100, -40, 200, 1000, 10000]]]) input_array = np.array([[[-40, 200, 1000]]]) msg = "all the input arrays must have same number of dimensions" with self.assertRaisesRegexp(ValueError, msg): @@ -395,8 +391,6 @@ def test_2d_example(self): Another basic test that the result is a numpy array with the expected contents. """ - expected = np.array([[-100, -40, 200, 1000, 10000], - [-100, -40, 200, 1000, 10000]]) percentiles = np.array([[-40, 200, 1000], [-40, 200, 1000]]) msg = "all the input arrays must have same number of dimensions" with self.assertRaisesRegexp(ValueError, msg): @@ -437,7 +431,6 @@ def test_percentile_is_dimension_coordinate(self): probabilistic dimension and is generally of the expected size. """ cube = self.current_temperature_forecast_cube - input_array = cube.data plen = len(cube.coord("percentile").points) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( @@ -458,7 +451,6 @@ def test_percentile_is_not_dimension_coordinate(self): cube = self.current_temperature_forecast_cube for cube_slice in cube.slices_over("percentile"): break - input_array = cube_slice.data plen = len(cube_slice.coord("percentile").points) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 7b2af8579b..fe48f26254 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -36,20 +36,16 @@ import numpy as np import unittest -from cf_units import Unit -from iris.coords import AuxCoord, DimCoord +from iris.coords import AuxCoord from iris.cube import Cube from iris.tests import IrisTest from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromProbabilities as Plugin) -from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period, set_up_probability_above_threshold_cube, set_up_probability_above_threshold_temperature_cube, - set_up_probability_above_threshold_spot_cube, set_up_probability_above_threshold_spot_temperature_cube) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index dc93943c70..fa6a3705cf 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -34,19 +34,15 @@ import numpy as np import unittest -from cf_units import Unit -from iris.coords import AuxCoord, DimCoord +from iris.coords import AuxCoord from iris.cube import Cube from iris.tests import IrisTest from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( ResamplePercentiles as Plugin) -from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period, - set_up_cube, set_up_temperature_cube, set_up_spot_cube, - set_up_spot_temperature_cube) + set_up_cube, set_up_spot_cube, set_up_spot_temperature_cube) class Test__add_bounds_to_percentiles_and_forecast_values(IrisTest): From d7dbd259b8607c721c2ac86eeaef64f84c85a746 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 12:09:36 +0100 Subject: [PATCH 0124/1367] Some more Codacy fixes --- ...ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py | 3 --- ...ble_copula_coupling_GeneratePercentilesFromProbabilities.py | 1 - .../tests/test_ensemble_copula_coupling_ResamplePercentiles.py | 3 +-- 3 files changed, 1 insertion(+), 6 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index ddfad902b6..0e26e03704 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -418,7 +418,6 @@ def test_basic(self): Basic test that the result is a numpy array with the expected contents. """ cube = self.current_temperature_forecast_cube - input_array = cube.data plen = len(cube.coord("percentile").points) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( @@ -494,7 +493,6 @@ def test_percentile_is_dimension_coordinate_flattened_data(self): """ cube = self.current_temperature_forecast_cube flattened_data = cube.data.flatten() - input_array = cube.data plen = len(cube.coord("percentile").points) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( @@ -508,7 +506,6 @@ def test_missing_coordinate(self): Basic test that the result is a numpy array with the expected contents. """ cube = self.current_temperature_forecast_cube - input_array = cube.data plen = len(cube.coord("percentile").points) msg = "coordinate is not available" with self.assertRaisesRegexp(CoordinateNotFoundError, msg): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index fe48f26254..9a782e7aab 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -36,7 +36,6 @@ import numpy as np import unittest -from iris.coords import AuxCoord from iris.cube import Cube from iris.tests import IrisTest diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index fa6a3705cf..f7ffded714 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -34,7 +34,6 @@ import numpy as np import unittest -from iris.coords import AuxCoord from iris.cube import Cube from iris.tests import IrisTest @@ -42,7 +41,7 @@ ResamplePercentiles as Plugin) from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period, - set_up_cube, set_up_spot_cube, set_up_spot_temperature_cube) + set_up_cube, set_up_spot_cube) class Test__add_bounds_to_percentiles_and_forecast_values(IrisTest): From de54adfe38cd5453f6699dbf0e1243ee64616153 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 12:20:10 +0100 Subject: [PATCH 0125/1367] Codacy edit. --- .../tests/test_ensemble_copula_coupling_ResamplePercentiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index f7ffded714..4473883172 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -41,7 +41,7 @@ ResamplePercentiles as Plugin) from improver.tests.helper_functions_ensemble_calibration import( _add_forecast_reference_time_and_forecast_period, - set_up_cube, set_up_spot_cube) + set_up_cube, set_up_spot_temperature_cube) class Test__add_bounds_to_percentiles_and_forecast_values(IrisTest): From 5c45d58335470b95262ad9d42b3ebd24eb4b705c Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 15:34:33 +0100 Subject: [PATCH 0126/1367] More Codacy fixes. --- .../ensemble_calibration.py | 1 - .../ensemble_copula_coupling.py | 22 ++++++++++++------- ...mble_copula_coupling_EnsembleReordering.py | 18 +++++++-------- 3 files changed, 23 insertions(+), 18 deletions(-) diff --git a/lib/improver/ensemble_calibration/ensemble_calibration.py b/lib/improver/ensemble_calibration/ensemble_calibration.py index 135f7f8ab5..bb9ac5a096 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration.py @@ -32,7 +32,6 @@ This module defines all the "plugins" specific for ensemble calibration. """ -import copy import numpy as np from scipy import stats from scipy.optimize import minimize diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 74f1176200..bc54ac86b9 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -64,7 +64,8 @@ def __init__(self): """ pass - def process(self, cube): + @staticmethod + def process(cube): """ Rebadge percentiles as ensemble members. The ensemble member numbering will depend upon the number of percentiles in the input cube i.e. @@ -105,8 +106,9 @@ def __init__(self): """ pass + @staticmethod def _add_bounds_to_percentiles_and_forecast_at_percentiles( - self, percentiles, forecast_at_percentiles, bounds_pairing): + percentiles, forecast_at_percentiles, bounds_pairing): """ Padding of the lower and upper bounds of the percentiles for a given phenomenon, and padding of forecast values using the @@ -285,8 +287,9 @@ def __init__(self): """ pass + @staticmethod def _add_bounds_to_thresholds_and_probabilities( - self, threshold_points, probabilities_for_cdf, bounds_pairing): + threshold_points, probabilities_for_cdf, bounds_pairing): """ Padding of the lower and upper bounds of the distribution for a given phenomenon for the threshold_points, and padding of @@ -466,8 +469,9 @@ def __init__(self): """ pass + @staticmethod def _mean_and_variance_to_percentiles( - self, calibrated_forecast_predictor, calibrated_forecast_variance, + calibrated_forecast_predictor, calibrated_forecast_variance, percentiles): """ Function returning percentiles based on the supplied @@ -606,8 +610,9 @@ def __init__(self): """Initialise the class""" pass - def mismatch_between_length_of_raw_members_and_percentiles( - self, post_processed_forecast_percentiles, raw_forecast_members): + @staticmethod + def _mismatch_between_length_of_raw_members_and_percentiles( + post_processed_forecast_percentiles, raw_forecast_members): """ Function to determine whether there is a mismatch between the number of percentiles and the number of raw forecast members. If more @@ -658,8 +663,9 @@ def mismatch_between_length_of_raw_members_and_percentiles( concatenate_cubes(raw_forecast_members_extended)) return raw_forecast_members + @staticmethod def rank_ecc( - self, post_processed_forecast_percentiles, raw_forecast_members, + post_processed_forecast_percentiles, raw_forecast_members, random_ordering=False): """ Function to apply Ensemble Copula Coupling. This ranks the @@ -743,7 +749,7 @@ def process( coords_to_slice_over=["percentile", "time"]) raw_forecast_members = concatenate_cubes(raw_forecast) raw_forecast_members = ( - self.mismatch_between_length_of_raw_members_and_percentiles( + self._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members)) post_processed_forecast_members = self.rank_ecc( post_processed_forecast_percentiles, raw_forecast_members, diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 0407eeeb6e..f257a4f937 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -47,10 +47,10 @@ _add_forecast_reference_time_and_forecast_period) -class Test_mismatch_between_length_of_raw_members_and_percentiles(IrisTest): +class Test__mismatch_between_length_of_raw_members_and_percentiles(IrisTest): """ - Test the mismatch_between_length_of_raw_members_and_percentiles + Test the _mismatch_between_length_of_raw_members_and_percentiles method in the EnsembleReordering plugin. """ @@ -82,7 +82,7 @@ def test_realization_for_equal(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -100,7 +100,7 @@ def test_realization_for_greater_than(self): raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -119,7 +119,7 @@ def test_realization_for_less_than(self): post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -146,7 +146,7 @@ def test_realization_for_equal_check_data(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -172,7 +172,7 @@ def test_realization_for_greater_than_check_data(self): # members than percentiles. raw_forecast_members = raw_forecast_members[:2, :, :, :] plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -194,7 +194,7 @@ def test_realization_for_less_than_check_data(self): post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -250,7 +250,7 @@ def test_realization_for_greater_than_check_data_lots_of_members(self): raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] plugin = Plugin() - result = plugin.mismatch_between_length_of_raw_members_and_percentiles( + result = plugin._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(expected, result.data) From a3e2256e0d5eaef1a000f51b7fcceae785676b70 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 24 May 2017 15:37:58 +0100 Subject: [PATCH 0127/1367] Fix pep8 line length issue. --- ...mble_copula_coupling_EnsembleReordering.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index f257a4f937..4719787011 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -81,8 +81,8 @@ def test_realization_for_equal(self): data = [0, 1, 2] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -99,8 +99,8 @@ def test_realization_for_greater_than(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -118,8 +118,8 @@ def test_realization_for_less_than(self): raw_forecast_members = self.realization_cube post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -145,8 +145,8 @@ def test_realization_for_equal_check_data(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -171,8 +171,8 @@ def test_realization_for_greater_than_check_data(self): # Slice number of raw forecast members, so that there are fewer # members than percentiles. raw_forecast_members = raw_forecast_members[:2, :, :, :] - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -193,8 +193,8 @@ def test_realization_for_less_than_check_data(self): raw_forecast_members = self.realization_cube post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -249,8 +249,8 @@ def test_realization_for_greater_than_check_data_lots_of_members(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] - plugin = Plugin() - result = plugin._mismatch_between_length_of_raw_members_and_percentiles( + plu = Plugin() + result = plu._mismatch_between_length_of_raw_members_and_percentiles( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(expected, result.data) From a0f302233213541143e7d1e4ee7e4632c33ca36f Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 30 May 2017 09:29:53 +0100 Subject: [PATCH 0128/1367] Changes to ensemble_copula_coupling_constants.py following review. A single dictionary is now used to define both the bounds and the units. --- lib/improver/constants.py | 3 +++ .../ensemble_copula_coupling.py | 2 -- .../ensemble_copula_coupling_constants.py | 22 +++++++++++-------- .../ensemble_copula_coupling_utilities.py | 13 +++++------ 4 files changed, 21 insertions(+), 19 deletions(-) diff --git a/lib/improver/constants.py b/lib/improver/constants.py index b02eb31917..f73bcbf040 100644 --- a/lib/improver/constants.py +++ b/lib/improver/constants.py @@ -32,3 +32,6 @@ # Real Missing Data Indicator RMDI = -32767.0 + +# 0 Kelvin in degrees C +ABSOLUTE_ZERO = 273.15 diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index bc54ac86b9..5c00b842f2 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -41,8 +41,6 @@ from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) -from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import (concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, create_percentiles, diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index cc84c58958..50a1e051a8 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -30,15 +30,19 @@ # POSSIBILITY OF SUCH DAMAGE. """Module to contain constants used for Ensemble Copula Coupling.""" +from collections import namedtuple + +from improver.constants import ABSOLUTE_ZERO + +# Define a namedtuple for use in the bounds_for_ecdf dictionary. +bounds = namedtuple("bounds", "value units") + # For the creation of an empirical cumulative distribution function, # the following dictionary specifies the end points of the distribution, # as a first approximation of likely climatological lower and upper bounds. -bounds_for_ecdf = {"air_temperature": (-40+273.15, 50+273.15), - "wind_speed": (0, 50), - "air_pressure_at_sea_level": (94000, 107000)} - -# Specify the units for the end points of the distribution for each phenomenon. -# SI units are used exclusively. -units_of_bounds_for_ecdf = {"air_temperature": "Kelvin", - "wind_speed": "m s^-1", - "air_pressure_at_sea_level": "Pa"} +# The units for the end points of the distribution are specified for each +# phenomenon. SI units are used exclusively. +bounds_for_ecdf = { + "air_temperature": bounds((-40+ABSOLUTE_ZERO, 50+ABSOLUTE_ZERO), "Kelvin"), + "wind_speed": bounds((0, 50), "m s^-1"), + "air_pressure_at_sea_level": bounds((94000, 107000), "Pa")} diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 8e8d1b8038..3319ebb858 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -42,7 +42,7 @@ from iris.exceptions import CoordinateNotFoundError from improver.ensemble_copula_coupling.ensemble_copula_coupling_constants \ - import bounds_for_ecdf, units_of_bounds_for_ecdf + import bounds_for_ecdf def concatenate_2d_array_with_2d_array_endpoints( @@ -211,16 +211,13 @@ def get_bounds_of_distribution(cube_name, cube_units): """ # Extract bounds from dictionary of constants. try: - bounds_pairing = bounds_for_ecdf[cube_name] - bounds_pairing_units = ( - units_of_bounds_for_ecdf[cube_name]) + bounds_pairing = bounds_for_ecdf[cube_name].value + bounds_pairing_units = bounds_for_ecdf[cube_name].units except KeyError as err: msg = ("The forecast_cube name: {} is not recognised" - "within bounds_for_ecdf {} or " - "units_of_bounds_for_ecdf: {}. \n" + "within bounds_for_ecdf {}. \n" "Error: {}".format( - cube_name, bounds_for_ecdf, - units_of_bounds_for_ecdf, err)) + cube_name, bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( From d17a4545ab4ef4a45f0cbadbf84a9bac035758cf Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Tue, 30 May 2017 17:34:52 +0100 Subject: [PATCH 0129/1367] Edits following review of ensemble_copula_coupling_utilities. Work done to try to make reshape_array_to_have_probabilistic_dimension_at_the_front more robust for more general usage. --- .../ensemble_calibration.py | 14 +++- .../ensemble_calibration_utilities.py | 38 +++++++++++ .../ensemble_copula_coupling.py | 28 ++++++-- .../ensemble_copula_coupling_utilities.py | 55 +++++++++------ ...alibration_EnsembleCalibrationUtilities.py | 67 ++++++++++++++++++- ...oupling_EnsembleCopulaCouplingUtilities.py | 36 +++++++--- 6 files changed, 201 insertions(+), 37 deletions(-) diff --git a/lib/improver/ensemble_calibration/ensemble_calibration.py b/lib/improver/ensemble_calibration/ensemble_calibration.py index bb9ac5a096..901526d8e0 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration.py @@ -42,8 +42,9 @@ import iris from improver.ensemble_calibration.ensemble_calibration_utilities import ( - convert_cube_data_to_2d, concatenate_cubes, rename_coordinate, - check_predictor_of_mean_flag) + convert_cube_data_to_2d, concatenate_cubes, + ensure_dimension_is_the_first_dimension, + rename_coordinate, check_predictor_of_mean_flag) class ContinuousRankedProbabilityScoreMinimisers(object): @@ -162,6 +163,9 @@ def calculate_percentage_change_in_last_iteration(allvecs): forecast_var_data = forecast_var.data.flatten() elif predictor_of_mean_flag.lower() in ["members"]: truth_data = truth.data.flatten() + forecast_predictor = ( + ensure_dimension_is_the_first_dimension( + forecast_predictor, "realization")) forecast_predictor_data = convert_cube_data_to_2d( forecast_predictor) forecast_var_data = forecast_var.data.flatten() @@ -443,6 +447,9 @@ def compute_initial_guess( elif predictor_of_mean_flag.lower() in ["members"]: if self.statsmodels_found: truth_data = truth.data.flatten() + forecast_predictor = ( + ensure_dimension_is_the_first_dimension( + forecast_predictor, "realization")) forecast_data = np.array( convert_cube_data_to_2d( forecast_predictor, transpose=False)) @@ -945,6 +952,9 @@ def _apply_params( beta = np.concatenate( [[optimised_coeffs_at_date["a"]], optimised_coeffs_at_date["beta"]**2]) + forecast_predictor = ( + ensure_dimension_is_the_first_dimension( + forecast_predictor, "realization")) forecast_predictor_flat = ( convert_cube_data_to_2d( forecast_predictor_at_date)) diff --git a/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py b/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py index 416a718527..5c4e867a75 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py @@ -68,6 +68,44 @@ def convert_cube_data_to_2d( return np.array(forecast_data) +def ensure_dimension_is_the_first_dimension(cube, coord): + """ + Function to ensure that the requested coordinate within the cube is + the first dimension within the cube. + + If the requested dimension coordinate exists, the cube is transposed. + If the requested coordinate exists, but it is not a dimension coordinate + i.e. a scalar coordinate, then a new axis is created with the scalar + coordinate becoming a dimension coordinate. + If the coordinate is not present on the cube, then an error is raised. + + Parameters + ---------- + cube : Iris cube + Cube where the requirement for the required dimension to be the first + dimension will be enforced. + coord : String + Name of the coordinate that is to be made the first dimension + coordinate in the cube. + + """ + if cube.coords(coord, dim_coords=True): + if cube.coord_dims(coord)[0] != 0: + coords = [] + for acoord in cube.coords(dim_coords=True): + if acoord.name() not in [coord]: + coords.append(cube.coord_dims(acoord)[0]) + first_coord = cube.coord_dims(coord)[0] + cube.transpose([first_coord]+coords) + elif cube.coords(coord, dim_coords=False): + cube = iris.util.new_axis(cube, coord) + else: + msg = ("The coordinate {} is not a dimension coordinate " + "in the cube: {}".format(coord, cube)) + raise ValueError(msg) + return cube + + def concatenate_cubes( cubes, coords_to_slice_over=["realization", "time"], master_coord="time", diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 5c00b842f2..1ee5a1582d 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -40,10 +40,11 @@ from iris.exceptions import CoordinateNotFoundError from improver.ensemble_calibration.ensemble_calibration_utilities import ( - concatenate_cubes, convert_cube_data_to_2d, rename_coordinate) + concatenate_cubes, convert_cube_data_to_2d, + ensure_dimension_is_the_first_dimension, rename_coordinate) from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import (concatenate_2d_array_with_2d_array_endpoints, - create_cube_with_percentiles, create_percentiles, + create_cube_with_percentiles, choose_set_of_percentiles, get_bounds_of_distribution, insert_lower_and_upper_endpoint_to_1d_array, reshape_array_to_have_probabilistic_dimension_at_the_front) @@ -177,6 +178,11 @@ def _sample_percentiles( original_percentiles = ( forecast_at_percentiles.coord("percentile").points) + # Ensure that the percentile dimension is first, so that the + # conversion to a 2d array produces data in the desired order. + forecast_at_percentiles = ( + ensure_dimension_is_the_first_dimension( + forecast_at_percentiles, "percentile")) forecast_at_reshaped_percentiles = convert_cube_data_to_2d( forecast_at_percentiles, coord="percentile") @@ -249,7 +255,7 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, no_of_percentiles = ( len(forecast_at_percentiles.coord("percentile").points)) - percentiles = create_percentiles( + percentiles = choose_set_of_percentiles( no_of_percentiles, sampling=sampling) cube_units = forecast_at_percentiles.units @@ -357,6 +363,11 @@ def _probabilities_to_percentiles( threshold_points = ( forecast_probabilities.coord("probability_above_threshold").points) + # Ensure that the percentile dimension is first, so that the + # conversion to a 2d array produces data in the desired order. + forecast_probabilities = ( + ensure_dimension_is_the_first_dimension( + forecast_probabilities, "probability_above_threshold")) prob_slices = convert_cube_data_to_2d( forecast_probabilities, coord="probability_above_threshold") @@ -440,7 +451,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, len(forecast_probabilities.coord( "probability_above_threshold").points)) - percentiles = create_percentiles( + percentiles = choose_set_of_percentiles( no_of_percentiles, sampling=sampling) cube_units = ( @@ -500,6 +511,13 @@ def _mean_and_variance_to_percentiles( calibrated_forecast_variance = iris.util.new_axis( calibrated_forecast_variance, "time") + calibrated_forecast_predictor = ( + ensure_dimension_is_the_first_dimension( + calibrated_forecast_predictor, "realization")) + calibrated_forecast_variance = ( + ensure_dimension_is_the_first_dimension( + calibrated_forecast_variance, "realization")) + calibrated_forecast_predictor_data = ( calibrated_forecast_predictor.data.flatten()) calibrated_forecast_variance_data = ( @@ -581,7 +599,7 @@ def process(self, calibrated_forecast_predictor_and_variance, no_of_percentiles = len( raw_forecast_members.coord("realization").points) - percentiles = create_percentiles(no_of_percentiles) + percentiles = choose_set_of_percentiles(no_of_percentiles) calibrated_forecast_percentiles = ( self._mean_and_variance_to_percentiles( calibrated_forecast_predictor, diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 3319ebb858..f98a740eb3 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -49,6 +49,8 @@ def concatenate_2d_array_with_2d_array_endpoints( array_2d, low_endpoint, high_endpoint): """ For a 2d array, add a 2d array as the lower and upper endpoints. + The concatenation to add the lower and upper endpoints to the 2d array + are performed along the second (index 1) dimension. Parameters ---------- @@ -75,7 +77,7 @@ def concatenate_2d_array_with_2d_array_endpoints( return array_2d -def create_percentiles(no_of_percentiles, sampling="quantile"): +def choose_set_of_percentiles(no_of_percentiles, sampling="quantile"): """ Function to create percentiles. @@ -107,11 +109,15 @@ def create_percentiles(no_of_percentiles, sampling="quantile"): """ if sampling in ["quantile"]: + # Generate percentiles from 1/N+1 to N/N+1. percentiles = np.linspace( 1/float(1+no_of_percentiles), no_of_percentiles/float(1+no_of_percentiles), no_of_percentiles).tolist() elif sampling in ["random"]: + # Generate percentiles from 1/N+1 to N/N+1. + # Random sampling doesn't currently sample the ends of the + # distribution i.e. 0 to 1/N+1 and N/N+1 to 1. percentiles = [] for _ in range(no_of_percentiles): percentiles.append( @@ -130,7 +136,7 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): """ Create a cube with a percentile coordinate based on a template cube. The resulting cube will have an extra percentile coordinate compared with - the input cube. The shape of the cube_data should be the shape of the + the template cube. The shape of the cube_data should be the shape of the desired output cube. Parameters @@ -140,6 +146,7 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): as the first dimension of cube_data. template_cube : Iris cube Cube to copy all coordinates from. + The template_cube does not contain any existing percentile coordinate. Metadata is also copied from this cube. cube_data : Numpy array Data to insert into the template cube. @@ -183,22 +190,21 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): return result -def get_bounds_of_distribution(cube_name, cube_units): +def get_bounds_of_distribution(bounds_pairing_key, desired_units): """ Gets the bounds of the distribution and converts the units of the - bounds_pairing to the units of the forecast. + bounds_pairing to the desired_units. This method gets the bounds values and units from the imported dictionaries: bounds_for_ecdf and units_of_bounds_for_ecdf. - The units of the bounds are converted to be the units of the input - cube. + The units of the bounds are converted to be the desired units. Parameters ---------- - cube_name : String - Name of cube, which is used as the key for the bounds_for_ecdf and - units_of_bounds_for_ecdf dictionaries. - cube_units : cf_units.Unit + bounds_pairing_key : String + Name of key to be used for the bounds_for_ecdf dictionary, in order + to get the desired bounds_pairing. + desired_units : cf_units.Unit Units to which the bounds_pairing will be converted. Returns @@ -206,22 +212,22 @@ def get_bounds_of_distribution(cube_name, cube_units): bounds_pairing : Tuple Lower and upper bound to be used as the ends of the empirical cumulative distribution function, converted to have - the same units as the input cube. + the desired units. """ # Extract bounds from dictionary of constants. try: - bounds_pairing = bounds_for_ecdf[cube_name].value - bounds_pairing_units = bounds_for_ecdf[cube_name].units + bounds_pairing = bounds_for_ecdf[bounds_pairing_key].value + bounds_pairing_units = bounds_for_ecdf[bounds_pairing_key].units except KeyError as err: - msg = ("The forecast_cube name: {} is not recognised" + msg = ("The bounds_pairing_key: {} is not recognised " "within bounds_for_ecdf {}. \n" "Error: {}".format( - cube_name, bounds_for_ecdf, err)) + bounds_pairing_key, bounds_for_ecdf, err)) raise KeyError(msg) bounds_pairing_units = unit.Unit(bounds_pairing_units) bounds_pairing = bounds_pairing_units.convert( - np.array(bounds_pairing), cube_units) + np.array(bounds_pairing), desired_units) return bounds_pairing @@ -280,9 +286,18 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( shape_to_reshape_to = list(original_cube.shape) if original_cube.coords( input_probabilistic_dimension_name, dim_coords=True): - pat_coord_position = ( - original_cube.coord_dims(input_probabilistic_dimension_name)) - shape_to_reshape_to.pop(pat_coord_position[0]) + if original_cube.coord_dims( + input_probabilistic_dimension_name)[0] == 0: + pat_coord_position = ( + original_cube.coord_dims(input_probabilistic_dimension_name)) + shape_to_reshape_to.pop(pat_coord_position[0]) + else: + msg = ("The {} coordinate is a dimension coordinate but is not " + "the first dimension coordinate in the cube: {}.\n" + "The ensure_dimension_is_the_first_dimension function " + "may be useful. ".format( + input_probabilistic_dimension_name, original_cube)) + raise ValueError(msg) elif original_cube.coords( input_probabilistic_dimension_name, dim_coords=False): pass @@ -291,5 +306,5 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( input_probabilistic_dimension_name, original_cube)) raise CoordinateNotFoundError(msg) shape_to_reshape_to = ( - [output_probabilistic_dimension_length] + shape_to_reshape_to) + [output_probabilistic_dimension_length] + shape_to_reshape_to) return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py index a01b458bfb..190a5050e8 100644 --- a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py +++ b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py @@ -44,8 +44,8 @@ import numpy as np from improver.ensemble_calibration.ensemble_calibration_utilities import ( - convert_cube_data_to_2d, concatenate_cubes, - _associate_any_coordinate_with_master_coordinate, + convert_cube_data_to_2d, ensure_dimension_is_the_first_dimension, + concatenate_cubes, _associate_any_coordinate_with_master_coordinate, _slice_over_coordinate, _strip_var_names, rename_coordinate, _renamer, check_predictor_of_mean_flag) from improver.tests.helper_functions_ensemble_calibration import( @@ -197,6 +197,69 @@ def test_5d_cube(self): self.assertArrayAlmostEqual(result, data) +class Test_ensure_dimension_is_the_first_dimension(IrisTest): + + """ + Test the ensure_dimension_is_the_first_dimension + utility. + """ + + def setUp(self): + """Use temperature cube to test with.""" + self.cube = set_up_temperature_cube() + + def test_basic(self): + """Test that the function returns an iris.cube.Cube.""" + result = ( + ensure_dimension_is_the_first_dimension(self.cube, "realization")) + self.assertIsInstance(result, Cube) + + def test_if_probabilistic_dimension_is_first(self): + """ + Test that a cube with the expected data contents is returned when + the probabilistic dimension is the first dimension coordinate. + """ + result = ( + ensure_dimension_is_the_first_dimension(self.cube, "realization")) + self.assertArrayAlmostEqual(result.data, self.cube.data) + + def test_if_probabilistic_dimension_is_not_first(self): + """ + Test that a cube with the expected data contents is returned when + the probabilistic dimension is a dimension coordinate but it is + not the first dimension coordinate,. + """ + expected = self.cube.copy() + expected.transpose([0, 3, 2, 1]) + + cube = self.cube.copy() + cube.transpose([3, 2, 1, 0]) + result = ( + ensure_dimension_is_the_first_dimension(cube, "realization")) + self.assertArrayAlmostEqual(result.data, expected.data) + + def test_if_probabilistic_dimension_is_scalar(self): + """ + Test that a cube with the expected data contents is returned when + the probabilistic dimension is a scalar coordinate. + """ + cube = self.cube[0, :, :, :] + result = ( + ensure_dimension_is_the_first_dimension(cube, "realization")) + self.assertArrayAlmostEqual(result.data, [cube.data]) + + def test_if_probabilistic_dimension_not_available(self): + """ + Test that the expected error message is raised when the required + probabilistic dimension is not available in the cube. + """ + cube = self.cube[0, :, :, :] + cube.remove_coord("realization") + msg = "not a dimension coordinate" + with self.assertRaisesRegexp(ValueError, msg): + ensure_dimension_is_the_first_dimension(cube, "realization") + + class Test_concatenate_cubes(IrisTest): """Test the concatenate_cubes utility.""" diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 0e26e03704..86336ed4f2 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -42,7 +42,7 @@ import numpy as np from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ - import (create_percentiles, create_cube_with_percentiles, + import (choose_set_of_percentiles, create_cube_with_percentiles, insert_lower_and_upper_endpoint_to_1d_array, concatenate_2d_array_with_2d_array_endpoints, get_bounds_of_distribution, @@ -263,9 +263,9 @@ def test_coordinate_copy(self): raise CoordinateNotFoundError(msg) -class Test_create_percentiles(IrisTest): +class Test_choose_set_of_percentiles(IrisTest): - """Test the create_percentiles plugin.""" + """Test the choose_set_of_percentiles plugin.""" def test_basic(self): """ @@ -273,7 +273,7 @@ def test_basic(self): percentiles. """ no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles) + result = choose_set_of_percentiles(no_of_percentiles) self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) @@ -284,7 +284,7 @@ def test_data(self): """ data = np.array([0.25, 0.5, 0.75]) no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles) + result = choose_set_of_percentiles(no_of_percentiles) self.assertArrayAlmostEqual(result, data) def test_random(self): @@ -293,7 +293,8 @@ def test_random(self): percentiles, if the random sampling option is selected. """ no_of_percentiles = 3 - result = create_percentiles(no_of_percentiles, sampling="random") + result = choose_set_of_percentiles( + no_of_percentiles, sampling="random") self.assertIsInstance(result, list) self.assertEqual(len(result), no_of_percentiles) @@ -305,7 +306,7 @@ def test_unknown_sampling_option(self): no_of_percentiles = 3 msg = "The unknown sampling option is not yet implemented" with self.assertRaisesRegexp(ValueError, msg): - create_percentiles(no_of_percentiles, sampling="unknown") + choose_set_of_percentiles(no_of_percentiles, sampling="unknown") class Test_get_bounds_of_distribution(IrisTest): @@ -354,7 +355,7 @@ def test_check_exception_is_raised(self): """ cube_name = "nonsense" cube_units = Unit("degreesC") - msg = "The forecast_cube name" + msg = "The bounds_pairing_key" with self.assertRaisesRegexp(KeyError, msg): get_bounds_of_distribution(cube_name, cube_units) @@ -428,6 +429,7 @@ def test_percentile_is_dimension_coordinate(self): """ Test that the result have the expected size for the probabilistic dimension and is generally of the expected size. + The array contents is also checked. """ cube = self.current_temperature_forecast_cube plen = len(cube.coord("percentile").points) @@ -438,10 +440,25 @@ def test_percentile_is_dimension_coordinate(self): self.assertEqual(reshaped_array.shape, cube.data.shape) self.assertArrayAlmostEqual(reshaped_array, cube.data) + def test_if_percentile_is_not_first_dimension_coordinate(self): + """ + Test that the result have the expected size for the + probabilistic dimension and is generally of the expected size. + The array contents is also checked. + """ + cube = self.current_temperature_forecast_cube + cube.transpose([3, 2, 1, 0]) + plen = len(cube.coord("percentile").points) + msg = "coordinate is a dimension coordinate but is not" + with self.assertRaisesRegexp(ValueError, msg): + reshape_array_to_have_probabilistic_dimension_at_the_front( + cube.data, cube, "percentile", plen) + def test_percentile_is_not_dimension_coordinate(self): """ Test the array size, if the percentile coordinate is not a dimension coordinate on the cube. + The array contents is also checked. """ expected = np.array([[[[226.15, 237.4, 248.65], [259.9, 271.15, 282.4], @@ -462,6 +479,7 @@ def test_percentile_is_dimension_coordinate_multiple_timesteps(self): """ Test that the data has been reshaped correctly when multiple timesteps are in the cube. + The array contents is also checked. """ expected = np.array([[[[4., 4.71428571], [5.42857143, 6.14285714]], @@ -490,6 +508,7 @@ def test_percentile_is_dimension_coordinate_flattened_data(self): """ Test the array size, if the percentile coordinate is not a dimension coordinate on the cube. + The array contents is also checked. """ cube = self.current_temperature_forecast_cube flattened_data = cube.data.flatten() @@ -504,6 +523,7 @@ def test_percentile_is_dimension_coordinate_flattened_data(self): def test_missing_coordinate(self): """ Basic test that the result is a numpy array with the expected contents. + The array contents is also checked. """ cube = self.current_temperature_forecast_cube plen = len(cube.coord("percentile").points) From ea539e1cd76a9d7d84dc32088247db86209bc02d Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Wed, 31 May 2017 16:15:52 +0100 Subject: [PATCH 0130/1367] Edits following detailed review comments on ensemble_copula_coupling and ensemble_calibration_utilities. --- .../ensemble_calibration.py | 8 +- .../ensemble_calibration_utilities.py | 5 +- .../ensemble_copula_coupling.py | 119 +++++++++--------- .../ensemble_copula_coupling_constants.py | 5 + .../ensemble_copula_coupling_utilities.py | 4 +- ...alibration_EnsembleCalibrationUtilities.py | 16 +-- ...mble_copula_coupling_EnsembleReordering.py | 25 ++-- ..._GeneratePercentilesFromMeanAndVariance.py | 7 +- ...la_coupling_RebadgePercentilesAsMembers.py | 30 ++++- ...ble_copula_coupling_ResamplePercentiles.py | 25 ++-- 10 files changed, 146 insertions(+), 98 deletions(-) diff --git a/lib/improver/ensemble_calibration/ensemble_calibration.py b/lib/improver/ensemble_calibration/ensemble_calibration.py index 901526d8e0..c2412e8913 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration.py @@ -43,7 +43,7 @@ from improver.ensemble_calibration.ensemble_calibration_utilities import ( convert_cube_data_to_2d, concatenate_cubes, - ensure_dimension_is_the_first_dimension, + ensure_dimension_is_the_zeroth_dimension, rename_coordinate, check_predictor_of_mean_flag) @@ -164,7 +164,7 @@ def calculate_percentage_change_in_last_iteration(allvecs): elif predictor_of_mean_flag.lower() in ["members"]: truth_data = truth.data.flatten() forecast_predictor = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( forecast_predictor, "realization")) forecast_predictor_data = convert_cube_data_to_2d( forecast_predictor) @@ -448,7 +448,7 @@ def compute_initial_guess( if self.statsmodels_found: truth_data = truth.data.flatten() forecast_predictor = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( forecast_predictor, "realization")) forecast_data = np.array( convert_cube_data_to_2d( @@ -953,7 +953,7 @@ def _apply_params( [[optimised_coeffs_at_date["a"]], optimised_coeffs_at_date["beta"]**2]) forecast_predictor = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( forecast_predictor, "realization")) forecast_predictor_flat = ( convert_cube_data_to_2d( diff --git a/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py b/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py index 5c4e867a75..85b4b2947a 100644 --- a/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py +++ b/lib/improver/ensemble_calibration/ensemble_calibration_utilities.py @@ -52,7 +52,10 @@ def convert_cube_data_to_2d( The data will be flattened along this coordinate. transpose : Logical If True, the resulting flattened data is transposed. + This will transpose a 2d array of the format [:, coord] + to [coord, :]. If False, the resulting flattened data is not transposed. + This will result in a 2d array of format [:, coord]. Returns ------- @@ -68,7 +71,7 @@ def convert_cube_data_to_2d( return np.array(forecast_data) -def ensure_dimension_is_the_first_dimension(cube, coord): +def ensure_dimension_is_the_zeroth_dimension(cube, coord): """ Function to ensure that the requested coordinate within the cube is the first dimension within the cube. diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 1ee5a1582d..52ea6fabe8 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -41,7 +41,7 @@ from improver.ensemble_calibration.ensemble_calibration_utilities import ( concatenate_cubes, convert_cube_data_to_2d, - ensure_dimension_is_the_first_dimension, rename_coordinate) + ensure_dimension_is_the_zeroth_dimension) from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import (concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, choose_set_of_percentiles, @@ -64,7 +64,7 @@ def __init__(self): pass @staticmethod - def process(cube): + def process(cube, ensemble_member_numbers=None): """ Rebadge percentiles as ensemble members. The ensemble member numbering will depend upon the number of percentiles in the input cube i.e. @@ -82,8 +82,11 @@ def process(cube): "the input cube: {}.".format(cube)) raise CoordinateNotFoundError(msg) - plen = len(cube.coord("percentile").points) - cube.coord("percentile").points = np.arange(plen) + if ensemble_member_numbers is None: + ensemble_member_numbers = ( + np.arange(len(cube.coord("percentile").points))) + + cube.coord("percentile").points = ensemble_member_numbers cube.coord("percentile").rename("realization") return cube @@ -121,7 +124,7 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( Array containing the underlying forecast values at each percentile. bounds_pairing : Tuple Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. + cumulative distribution function. Returns ------- percentiles : Numpy array @@ -136,10 +139,10 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( forecast_at_percentiles, lower_bound, upper_bound) if np.any(np.diff(forecast_at_percentiles) < 0): msg = ("The end points added to the forecast at percentiles " - "values representing for each percentile must result in " + "values representing each percentile must result in " "an ascending order. " "In this case, the forecast at percentile values {} " - "must be outside the allowable range given by the " + "is outside the allowable range given by the " "bounds {}".format( forecast_at_percentiles, bounds_pairing)) raise ValueError(msg) @@ -149,7 +152,7 @@ def _add_bounds_to_percentiles_and_forecast_at_percentiles( raise ValueError(msg) return percentiles, forecast_at_percentiles - def _sample_percentiles( + def _interpolate_percentiles( self, forecast_at_percentiles, desired_percentiles, bounds_pairing): """ @@ -166,7 +169,7 @@ def _sample_percentiles( Array of the desired percentiles. bounds_pairing : Tuple Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. + cumulative distribution function. Returns ------- @@ -181,7 +184,7 @@ def _sample_percentiles( # Ensure that the percentile dimension is first, so that the # conversion to a 2d array produces data in the desired order. forecast_at_percentiles = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( forecast_at_percentiles, "percentile")) forecast_at_reshaped_percentiles = convert_cube_data_to_2d( forecast_at_percentiles, coord="percentile") @@ -213,7 +216,6 @@ def _sample_percentiles( break percentile_cube = create_cube_with_percentiles( desired_percentiles, template_cube, forecast_at_percentiles_data) - percentile_cube.cell_methods = {} return percentile_cube def process(self, forecast_at_percentiles, no_of_percentiles=None, @@ -247,6 +249,7 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, ------- forecast_at_percentiles : Iris cube Cube with forecast values at the desired set of percentiles. + The percentile coordinate is always the zeroth dimension. """ forecast_at_percentiles = concatenate_cubes(forecast_at_percentiles) @@ -263,7 +266,7 @@ def process(self, forecast_at_percentiles, no_of_percentiles=None, get_bounds_of_distribution( forecast_at_percentiles.name(), cube_units)) - forecast_at_percentiles = self._sample_percentiles( + forecast_at_percentiles = self._interpolate_percentiles( forecast_at_percentiles, percentiles, bounds_pairing) return forecast_at_percentiles @@ -305,11 +308,11 @@ def _add_bounds_to_thresholds_and_probabilities( Array of threshold values used to calculate the probabilities. probabilities_for_cdf : Numpy array Array containing the probabilities used for constructing an - empirical cumulative distribution function i.e. probabilities + cumulative distribution function i.e. probabilities below threshold. bounds_pairing : Tuple Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. + cumulative distribution function. Returns ------- threshold_points : Numpy array @@ -338,7 +341,7 @@ def _probabilities_to_percentiles( self, forecast_probabilities, percentiles, bounds_pairing): """ Conversion of probabilities to percentiles through the construction - of an empirical cumulative distribution function. This is effectively + of an cumulative distribution function. This is effectively constructed by linear interpolation from the probabilities associated with each threshold to a set of percentiles. @@ -351,7 +354,7 @@ def _probabilities_to_percentiles( calculated. bounds_pairing : Tuple Lower and upper bound to be used as the ends of the - empirical cumulative distribution function. + cumulative distribution function. Returns ------- @@ -366,7 +369,7 @@ def _probabilities_to_percentiles( # Ensure that the percentile dimension is first, so that the # conversion to a 2d array produces data in the desired order. forecast_probabilities = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( forecast_probabilities, "probability_above_threshold")) prob_slices = convert_cube_data_to_2d( forecast_probabilities, coord="probability_above_threshold") @@ -406,7 +409,6 @@ def _probabilities_to_percentiles( break percentile_cube = create_cube_with_percentiles( percentiles, template_cube, forecast_at_percentiles) - percentile_cube.cell_methods = {} return percentile_cube def process(self, forecast_probabilities, no_of_percentiles=None, @@ -415,7 +417,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, 1. Concatenates cubes with a probability_above_threshold coordinate. 2. Creates a list of percentiles. 3. Accesses the lower and upper bound pair to find the ends of the - empirical cumulative distribution function. + cumulative distribution function. 4. Convert the probability_above_threshold coordinate into values at a set of percentiles using linear interpolation, see Figure 1 from Flowerdew, 2014. @@ -442,6 +444,7 @@ def process(self, forecast_probabilities, no_of_percentiles=None, ------- forecast_at_percentiles : Iris cube Cube with forecast values at the desired set of percentiles. + The threshold coordinate is always the zeroth dimension. """ forecast_probabilities = concatenate_cubes(forecast_probabilities) @@ -504,18 +507,11 @@ def _mean_and_variance_to_percentiles( percentiles requested. """ - if not calibrated_forecast_predictor.coord_dims("time"): - calibrated_forecast_predictor = iris.util.new_axis( - calibrated_forecast_predictor, "time") - if not calibrated_forecast_variance.coord_dims("time"): - calibrated_forecast_variance = iris.util.new_axis( - calibrated_forecast_variance, "time") - calibrated_forecast_predictor = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( calibrated_forecast_predictor, "realization")) calibrated_forecast_variance = ( - ensure_dimension_is_the_first_dimension( + ensure_dimension_is_the_zeroth_dimension( calibrated_forecast_variance, "realization")) calibrated_forecast_predictor_data = ( @@ -561,12 +557,13 @@ def _mean_and_variance_to_percentiles( break percentile_cube = create_cube_with_percentiles( percentiles, template_cube, result) - + # Remove cell methods aimed at removing cell methods associated with + # finding the ensemble mean, which are no longer relevant. percentile_cube.cell_methods = {} return percentile_cube def process(self, calibrated_forecast_predictor_and_variance, - raw_forecast): + no_of_percentiles): """ Generate ensemble percentiles from the mean and variance. @@ -583,6 +580,7 @@ def process(self, calibrated_forecast_predictor_and_variance, ------- calibrated_forecast_percentiles : Iris cube Cube for calibrated percentiles. + The percentile coordinate is always the zeroth dimension. """ (calibrated_forecast_predictor, calibrated_forecast_variance) = ( @@ -592,12 +590,6 @@ def process(self, calibrated_forecast_predictor_and_variance, calibrated_forecast_predictor) calibrated_forecast_variance = concatenate_cubes( calibrated_forecast_variance) - rename_coordinate( - raw_forecast, "ensemble_member_id", "realization") - raw_forecast_members = concatenate_cubes(raw_forecast) - - no_of_percentiles = len( - raw_forecast_members.coord("realization").points) percentiles = choose_set_of_percentiles(no_of_percentiles) calibrated_forecast_percentiles = ( @@ -612,8 +604,8 @@ def process(self, calibrated_forecast_predictor_and_variance, class EnsembleReordering(object): """ Plugin for applying the reordering step of Ensemble Copula Coupling, - in order to generate ensemble members from percentiles. - The percentiles are assumed to be in ascending order. + in order to generate ensemble members with multivariate structure + from percentiles. The percentiles are assumed to be in ascending order. Reference: Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. @@ -627,14 +619,17 @@ def __init__(self): pass @staticmethod - def _mismatch_between_length_of_raw_members_and_percentiles( + def _recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members): """ Function to determine whether there is a mismatch between the number of percentiles and the number of raw forecast members. If more percentiles are requested than ensemble members, then the ensemble - members are recycled. If fewer percentiles are requested than - ensemble members, then only the first n ensemble members are used. + members are recycled. This assumes that the identity of the ensemble + members within the raw ensemble forecast is random, such that the + raw ensemble members are exchangeable. If fewer percentiles are + requested than ensemble members, then only the first n ensemble + members are used. Parameters ---------- @@ -666,11 +661,16 @@ def _mismatch_between_length_of_raw_members_and_percentiles( # numbers are recycled e.g. 1, 2, 3, 1, 2, 3, etc. for index in range(plen): realization_list.append(mpoints[index % len(mpoints)]) + + # Assume that the ensemble members are ascending linearly. + new_member_numbers = realization_list[0] + range(plen) + # Extract the members required in the realization_list from # the raw_forecast_members. Edit the member number as appropriate # and append to a cubelist containing rebadged raw ensemble # members. - for realization, index in zip(realization_list, range(plen)): + for realization, index in zip( + realization_list, new_member_numbers): constr = iris.Constraint(realization=realization) raw_forecast_member = raw_forecast_members.extract(constr) raw_forecast_member.coord("realization").points = index @@ -695,6 +695,8 @@ def rank_ecc( to be in ascending order. raw_forecast_members : cube Cube containing the raw (not post-processed) forecasts. + The probabilistic dimension is assumed to be the zeroth + dimension. random_ordering : Logical If random_ordering is True, the post-processed forecasts are reordered randomly, rather than using the ordering of the @@ -713,18 +715,19 @@ def rank_ecc( raw_forecast_members.slices_over("time"), post_processed_forecast_percentiles.slices_over("time")): random_data = np.random.random(rawfc.data.shape) - # Lexsort returns the indices sorted firstly by the - # primary key, the raw forecast data (unless random_ordering - # is enabled), and secondly by the secondary key, an array of - # random data, in order to split tied values randomly. if random_ordering: - fake_rawfc_data = np.random.random(rawfc.data.shape) - sorting_index = ( - np.lexsort((random_data, fake_rawfc_data), axis=0)) + # Returns the indices that would sort the array. + # As these indices are from a random dataset, only an argsort + # is used. + ranking = np.argsort(random_data, axis=0) else: + # Lexsort returns the indices sorted firstly by the + # primary key, the raw forecast data (unless random_ordering + # is enabled), and secondly by the secondary key, an array of + # random data, in order to split tied values randomly. sorting_index = np.lexsort((random_data, rawfc.data), axis=0) - # Returns the indices that would sort the array. - ranking = np.argsort(sorting_index, axis=0) + # Returns the indices that would sort the array. + ranking = np.argsort(sorting_index, axis=0) # Index the post-processed forecast data using the ranking array. # np.choose allows indexing of a 3d array using a 3d array, calfc.data = np.choose(ranking, calfc.data) @@ -758,18 +761,22 @@ def process( are representative of a specified probability threshold across the whole domain. """ - rename_coordinate( - raw_forecast, "ensemble_member_id", "realization") post_processed_forecast_percentiles = concatenate_cubes( post_processed_forecast, coords_to_slice_over=["percentile", "time"]) + post_processed_forecast_percentiles = ( + ensure_dimension_is_the_zeroth_dimension( + post_processed_forecast_percentiles, "percentile")) raw_forecast_members = concatenate_cubes(raw_forecast) + raw_forecast_members = ensure_dimension_is_the_zeroth_dimension( + raw_forecast_members, "realization") raw_forecast_members = ( - self._mismatch_between_length_of_raw_members_and_percentiles( + self._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members)) post_processed_forecast_members = self.rank_ecc( post_processed_forecast_percentiles, raw_forecast_members, random_ordering=random_ordering) - rename_coordinate( - post_processed_forecast_members, "percentile", "realization") + post_processed_forecast_members = ( + RebadgePercentilesAsMembers.process( + post_processed_forecast_members)) return post_processed_forecast_members diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 50a1e051a8..8c59209c1f 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -42,6 +42,11 @@ # as a first approximation of likely climatological lower and upper bounds. # The units for the end points of the distribution are specified for each # phenomenon. SI units are used exclusively. +# Scientific Reference: +# Flowerdew, J., 2014. +# Calibrated ensemble reliability whilst preserving spatial structure. +# Tellus Series A, Dynamic Meteorology and Oceanography, 66, 22662. + bounds_for_ecdf = { "air_temperature": bounds((-40+ABSOLUTE_ZERO, 50+ABSOLUTE_ZERO), "Kelvin"), "wind_speed": bounds((0, 50), "m s^-1"), diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index f98a740eb3..7fb19413ba 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -287,7 +287,7 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( if original_cube.coords( input_probabilistic_dimension_name, dim_coords=True): if original_cube.coord_dims( - input_probabilistic_dimension_name)[0] == 0: + input_probabilistic_dimension_name)[0] == 0: pat_coord_position = ( original_cube.coord_dims(input_probabilistic_dimension_name)) shape_to_reshape_to.pop(pat_coord_position[0]) @@ -306,5 +306,5 @@ def reshape_array_to_have_probabilistic_dimension_at_the_front( input_probabilistic_dimension_name, original_cube)) raise CoordinateNotFoundError(msg) shape_to_reshape_to = ( - [output_probabilistic_dimension_length] + shape_to_reshape_to) + [output_probabilistic_dimension_length] + shape_to_reshape_to) return array_to_reshape.reshape(shape_to_reshape_to) diff --git a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py index 190a5050e8..ea816cfa04 100644 --- a/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py +++ b/lib/improver/tests/test_ensemble_calibration_EnsembleCalibrationUtilities.py @@ -44,7 +44,7 @@ import numpy as np from improver.ensemble_calibration.ensemble_calibration_utilities import ( - convert_cube_data_to_2d, ensure_dimension_is_the_first_dimension, + convert_cube_data_to_2d, ensure_dimension_is_the_zeroth_dimension, concatenate_cubes, _associate_any_coordinate_with_master_coordinate, _slice_over_coordinate, _strip_var_names, rename_coordinate, _renamer, check_predictor_of_mean_flag) @@ -197,10 +197,10 @@ def test_5d_cube(self): self.assertArrayAlmostEqual(result, data) -class Test_ensure_dimension_is_the_first_dimension(IrisTest): +class Test_ensure_dimension_is_the_zeroth_dimension(IrisTest): """ - Test the ensure_dimension_is_the_first_dimension + Test the ensure_dimension_is_the_zeroth_dimension utility. """ @@ -211,7 +211,7 @@ def setUp(self): def test_basic(self): """Test that the function returns an iris.cube.Cube.""" result = ( - ensure_dimension_is_the_first_dimension(self.cube, "realization")) + ensure_dimension_is_the_zeroth_dimension(self.cube, "realization")) self.assertIsInstance(result, Cube) def test_if_probabilistic_dimension_is_first(self): @@ -220,7 +220,7 @@ def test_if_probabilistic_dimension_is_first(self): the probabilistic dimension is the first dimension coordinate. """ result = ( - ensure_dimension_is_the_first_dimension(self.cube, "realization")) + ensure_dimension_is_the_zeroth_dimension(self.cube, "realization")) self.assertArrayAlmostEqual(result.data, self.cube.data) def test_if_probabilistic_dimension_is_not_first(self): @@ -235,7 +235,7 @@ def test_if_probabilistic_dimension_is_not_first(self): cube = self.cube.copy() cube.transpose([3, 2, 1, 0]) result = ( - ensure_dimension_is_the_first_dimension(cube, "realization")) + ensure_dimension_is_the_zeroth_dimension(cube, "realization")) self.assertArrayAlmostEqual(result.data, expected.data) def test_if_probabilistic_dimension_is_scalar(self): @@ -245,7 +245,7 @@ def test_if_probabilistic_dimension_is_scalar(self): """ cube = self.cube[0, :, :, :] result = ( - ensure_dimension_is_the_first_dimension(cube, "realization")) + ensure_dimension_is_the_zeroth_dimension(cube, "realization")) self.assertArrayAlmostEqual(result.data, [cube.data]) def test_if_probabilistic_dimension_not_available(self): @@ -257,7 +257,7 @@ def test_if_probabilistic_dimension_not_available(self): cube.remove_coord("realization") msg = "not a dimension coordinate" with self.assertRaisesRegexp(ValueError, msg): - ensure_dimension_is_the_first_dimension(cube, "realization") + ensure_dimension_is_the_zeroth_dimension(cube, "realization") class Test_concatenate_cubes(IrisTest): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 4719787011..479d3df903 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -47,10 +47,10 @@ _add_forecast_reference_time_and_forecast_period) -class Test__mismatch_between_length_of_raw_members_and_percentiles(IrisTest): +class Test__recycle_raw_ensemble_members(IrisTest): """ - Test the _mismatch_between_length_of_raw_members_and_percentiles + Test the _recycle_raw_ensemble_members method in the EnsembleReordering plugin. """ @@ -82,7 +82,7 @@ def test_realization_for_equal(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -95,12 +95,13 @@ def test_realization_for_greater_than(self): percentiles is greater than the length of the members, check that the points of the realization coordinate is as expected. """ - data = [0, 1, 2] + data = [12, 13, 14] post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] + raw_forecast_members.coord("realization").points = [12, 13] plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -119,7 +120,7 @@ def test_realization_for_less_than(self): post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertIsInstance(result, Cube) self.assertArrayAlmostEqual( @@ -146,7 +147,7 @@ def test_realization_for_equal_check_data(self): post_processed_forecast_percentiles = self.percentile_cube raw_forecast_members = self.realization_cube plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -172,7 +173,7 @@ def test_realization_for_greater_than_check_data(self): # members than percentiles. raw_forecast_members = raw_forecast_members[:2, :, :, :] plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -194,7 +195,7 @@ def test_realization_for_less_than_check_data(self): post_processed_forecast_percentiles = ( post_processed_forecast_percentiles[:2, :, :, :]) plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(data, result.data) @@ -250,7 +251,7 @@ def test_realization_for_greater_than_check_data_lots_of_members(self): raw_forecast_members = self.realization_cube raw_forecast_members = raw_forecast_members[:2, :, :, :] plu = Plugin() - result = plu._mismatch_between_length_of_raw_members_and_percentiles( + result = plu._recycle_raw_ensemble_members( post_processed_forecast_percentiles, raw_forecast_members) self.assertArrayAlmostEqual(expected, result.data) @@ -542,6 +543,8 @@ def setUp(self): set_up_temperature_cube())) self.post_processed_percentiles.coord("realization").rename( "percentile") + self.post_processed_percentiles.coord("percentile").points = ( + [0.1, 0.5, 0.9]) def test_basic(self): """ @@ -552,6 +555,8 @@ def test_basic(self): result = plugin.process(self.post_processed_percentiles, self.raw_cube) self.assertIsInstance(result, Cube) self.assertTrue(result.coords("realization")) + self.assertArrayAlmostEqual( + result.coord("realization").points, [0, 1, 2]) def test_2d_cube_random_ordering(self): """ diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 2f4582c72e..5b7eb249dd 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -293,9 +293,10 @@ def test_basic(self): predictor_and_variance = CubeList( [current_forecast_predictor, current_forecast_variance]) + no_of_percentiles = len(raw_forecast.coord("realization").points) plugin = Plugin() - result = plugin.process(predictor_and_variance, raw_forecast) + result = plugin.process(predictor_and_variance, no_of_percentiles) self.assertIsInstance(result, Cube) def test_number_of_percentiles(self): @@ -313,8 +314,10 @@ def test_number_of_percentiles(self): predictor_and_variance = CubeList( [current_forecast_predictor, current_forecast_variance]) + no_of_percentiles = len(raw_forecast.coord("realization").points) + plugin = Plugin() - result = plugin.process(predictor_and_variance, raw_forecast) + result = plugin.process(predictor_and_variance, no_of_percentiles) self.assertEqual(len(raw_forecast.coord("realization").points), len(result.coord("percentile").points)) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py index e0499c95ba..89b9d42a0a 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py @@ -63,15 +63,36 @@ def setUp(self): self.current_temperature_cube = cube def test_basic(self): - """""" + """ + Test that a cube is produced is produced and the realization + coordinate is a dimension coordinate, after the percentile coordinate + is rebadged. + """ cube = self.current_temperature_cube plugin = Plugin() result = plugin.process(cube) self.assertIsInstance(result, Cube) self.assertIsInstance(result.coord("realization"), DimCoord) + def test_specify_member_numbers(self): + """ + Use the ensemble_member_numbers optional argument to specify particular + values for the ensemble member numbers. + """ + cube = self.current_temperature_cube + plen = len(cube.coord("percentile").points) + ensemble_member_numbers = np.arange(plen)+12 + plugin = Plugin() + result = plugin.process(cube, ensemble_member_numbers) + self.assertEqual(len(result.coord("realization").points), plen) + self.assertArrayAlmostEqual( + result.coord("realization").points, np.array([12, 13, 14])) + def test_number_of_members(self): - """""" + """ + Check the values for the realization coordinate generated without + specifying the ensemble_member_numbers argument. + """ cube = self.current_temperature_cube plen = len(cube.coord("percentile").points) plugin = Plugin() @@ -81,7 +102,10 @@ def test_number_of_members(self): result.coord("realization").points, np.array([0, 1, 2])) def test_no_percentile_coord(self): - """""" + """ + Check that requesting the desired percentile coordinate results in an + exception. + """ cube = self.current_temperature_cube cube.coord("percentile").rename("realization") plugin = Plugin() diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 4473883172..463eabac9a 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -142,10 +142,10 @@ def test_percentiles_not_ascending(self): percentiles, forecast_at_percentiles, bounds_pairing) -class Test__sample_percentiles(IrisTest): +class Test__interpolate_percentiles(IrisTest): """ - Test the _sample_percentiles method of the ResamplePercentiles plugin. + Test the _interpolate_percentiles method of the ResamplePercentiles plugin. """ def setUp(self): @@ -173,7 +173,8 @@ def test_basic(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + print "cube = ", cube + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertIsInstance(result, Cube) @@ -187,14 +188,14 @@ def test_transpose_cube_dimensions(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - nontransposed_result = plugin._sample_percentiles( + nontransposed_result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) # Calculate result for transposed cube. # Original cube dimensions are [P, T, Y, X]. # Transposed cube dimensions are [X, Y, T, P]. cube.transpose([3, 2, 1, 0]) - transposed_result = plugin._sample_percentiles( + transposed_result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) # Result cube will be [P, X, Y, T] @@ -225,7 +226,7 @@ def test_simple_check_data(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, expected) @@ -248,7 +249,7 @@ def test_check_data(self): percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -286,7 +287,7 @@ def test_check_data_multiple_timesteps(self): percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, expected) @@ -325,7 +326,7 @@ def test_check_single_threshold(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, expected) @@ -359,7 +360,7 @@ def test_lots_of_input_percentiles(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -404,7 +405,7 @@ def test_lots_of_percentiles(self): percentiles = np.arange(0.05, 1.0, 0.1) bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) @@ -420,7 +421,7 @@ def test_check_data_spot_forecasts(self): percentiles = [0.1, 0.5, 0.9] bounds_pairing = (-40, 50) plugin = Plugin() - result = plugin._sample_percentiles( + result = plugin._interpolate_percentiles( cube, percentiles, bounds_pairing) self.assertArrayAlmostEqual(result.data, data) From 9eb836d4e88be549e2822323db08c1ae7a0dd92d Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Thu, 1 Jun 2017 16:06:01 +0100 Subject: [PATCH 0131/1367] Edits to improve the metadata model for a cube with probabilities and thresholds associated with those probabilistics. --- .../ensemble_copula_coupling.py | 44 ++++++++--------- .../ensemble_copula_coupling_constants.py | 7 +-- .../ensemble_copula_coupling_utilities.py | 31 +++++++++++- .../helper_functions_ensemble_calibration.py | 18 ++++--- ...oupling_EnsembleCopulaCouplingUtilities.py | 48 +++++++++++++++++-- ...ng_GeneratePercentilesFromProbabilities.py | 14 +++--- ...ble_copula_coupling_ResamplePercentiles.py | 2 +- 7 files changed, 119 insertions(+), 45 deletions(-) diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py index 52ea6fabe8..52c8907b32 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling.py @@ -45,7 +45,7 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling_utilities \ import (concatenate_2d_array_with_2d_array_endpoints, create_cube_with_percentiles, choose_set_of_percentiles, - get_bounds_of_distribution, + find_coordinate, get_bounds_of_distribution, insert_lower_and_upper_endpoint_to_1d_array, reshape_array_to_have_probabilistic_dimension_at_the_front) @@ -277,9 +277,9 @@ class GeneratePercentilesFromProbabilities(object): In combination with the Ensemble Reordering plugin, this is a variant Ensemble Copula Coupling. - This class includes the ability to interpolate between probability - thresholds in order to generate the percentiles, see Figure 1 from - Flowerdew, 2014. + This class includes the ability to interpolate between probabilities + specified using multiple thresholds in order to generate the percentiles, + see Figure 1 from Flowerdew, 2014. Scientific Reference: Flowerdew, J., 2014. @@ -348,7 +348,7 @@ def _probabilities_to_percentiles( Parameters ---------- forecast_probabilities : Iris cube - Cube with a probability_above_threshold coordinate. + Cube with a threshold coordinate. percentiles : Numpy array Array of percentiles, at which the corresponding values will be calculated. @@ -363,16 +363,16 @@ def _probabilities_to_percentiles( air_temperature at the required percentiles. """ - threshold_points = ( - forecast_probabilities.coord("probability_above_threshold").points) + threshold_coord = find_coordinate(forecast_probabilities, "threshold") + threshold_points = threshold_coord.points # Ensure that the percentile dimension is first, so that the # conversion to a 2d array produces data in the desired order. forecast_probabilities = ( ensure_dimension_is_the_zeroth_dimension( - forecast_probabilities, "probability_above_threshold")) + forecast_probabilities, threshold_coord.name())) prob_slices = convert_cube_data_to_2d( - forecast_probabilities, coord="probability_above_threshold") + forecast_probabilities, coord=threshold_coord.name()) # Invert probabilities probabilities_for_cdf = 1 - prob_slices @@ -401,11 +401,11 @@ def _probabilities_to_percentiles( forecast_at_percentiles = ( reshape_array_to_have_probabilistic_dimension_at_the_front( forecast_at_percentiles, forecast_probabilities, - "probability_above_threshold", len(percentiles))) + threshold_coord.name(), len(percentiles))) for template_cube in forecast_probabilities.slices_over( - "probability_above_threshold"): - template_cube.remove_coord("probability_above_threshold") + threshold_coord.name()): + template_cube.remove_coord(threshold_coord.name()) break percentile_cube = create_cube_with_percentiles( percentiles, template_cube, forecast_at_percentiles) @@ -414,19 +414,18 @@ def _probabilities_to_percentiles( def process(self, forecast_probabilities, no_of_percentiles=None, sampling="quantile"): """ - 1. Concatenates cubes with a probability_above_threshold coordinate. + 1. Concatenates cubes with a threshold coordinate. 2. Creates a list of percentiles. 3. Accesses the lower and upper bound pair to find the ends of the cumulative distribution function. - 4. Convert the probability_above_threshold coordinate into + 4. Convert the threshold coordinate into values at a set of percentiles using linear interpolation, see Figure 1 from Flowerdew, 2014. Parameters ---------- forecast_probabilities : Iris CubeList or Iris Cube - Cube or CubeList expected to contain a probability_above_threshold - coordinate. + Cube or CubeList expected to contain a threshold coordinate. no_of_percentiles : Integer or None Number of percentiles If None, the number of thresholds within the input @@ -448,20 +447,21 @@ def process(self, forecast_probabilities, no_of_percentiles=None, """ forecast_probabilities = concatenate_cubes(forecast_probabilities) + threshold_coord = find_coordinate(forecast_probabilities, "threshold") if no_of_percentiles is None: no_of_percentiles = ( len(forecast_probabilities.coord( - "probability_above_threshold").points)) + threshold_coord.name()).points)) percentiles = choose_set_of_percentiles( no_of_percentiles, sampling=sampling) cube_units = ( - forecast_probabilities.coord("probability_above_threshold").units) + forecast_probabilities.coord(threshold_coord.name()).units) bounds_pairing = ( get_bounds_of_distribution( - forecast_probabilities.name(), cube_units)) + threshold_coord.name(), cube_units)) forecast_at_percentiles = self._probabilities_to_percentiles( forecast_probabilities, percentiles, bounds_pairing) @@ -757,9 +757,9 @@ def process( Returns ------- post-processed_forecast_members : cube - Cube for a new ensemble member where all points within the dataset - are representative of a specified probability threshold across the - whole domain. + Cube containing the new ensemble members where all points within + the dataset have been reordered in comparison to the input + percentiles. """ post_processed_forecast_percentiles = concatenate_cubes( post_processed_forecast, diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py index 8c59209c1f..2ee49cf3b7 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_constants.py @@ -48,6 +48,7 @@ # Tellus Series A, Dynamic Meteorology and Oceanography, 66, 22662. bounds_for_ecdf = { - "air_temperature": bounds((-40+ABSOLUTE_ZERO, 50+ABSOLUTE_ZERO), "Kelvin"), - "wind_speed": bounds((0, 50), "m s^-1"), - "air_pressure_at_sea_level": bounds((94000, 107000), "Pa")} + "air_temperature_threshold": ( + bounds((-40+ABSOLUTE_ZERO, 50+ABSOLUTE_ZERO), "Kelvin")), + "wind_speed_threshold": bounds((0, 50), "m s^-1"), + "air_pressure_at_sea_level_threshold": bounds((94000, 107000), "Pa")} diff --git a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py index 7fb19413ba..6ed956b197 100644 --- a/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py +++ b/lib/improver/ensemble_copula_coupling/ensemble_copula_coupling_utilities.py @@ -69,9 +69,9 @@ def concatenate_2d_array_with_2d_array_endpoints( high_endpoint. """ lower_array = ( - np.full((array_2d.shape[0], 1), low_endpoint)) + np.full((array_2d.shape[0], 1), low_endpoint, dtype=array_2d.dtype)) upper_array = ( - np.full((array_2d.shape[0], 1), high_endpoint)) + np.full((array_2d.shape[0], 1), high_endpoint, dtype=array_2d.dtype)) array_2d = np.concatenate( (lower_array, array_2d, upper_array), axis=1) return array_2d @@ -190,6 +190,33 @@ def create_cube_with_percentiles(percentiles, template_cube, cube_data): return result +def find_coordinate(cube, name_of_desired_coord): + """ + Find whether the requested coordinate name is within the + coordinates available on the cube. The matching will + work for either a full name match e.g. air_temperature == air_temperature, + or if the desired coord is a substring of the coordinate_name e.g. + 'threshold' will match 'air_temperature_threshold'. + + Parameters + ---------- + cube : Iris.cube.Cube + Cube to search for the desired coordinate. + name_of_desired_coord : String + Name or partial name of the coordinate to search for. + + """ + for coord in cube.coords(): + if name_of_desired_coord in coord.name(): + break + else: + msg = ("The coordinate of name: {} was not found " + "within {}".format( + name_of_desired_coord, cube.coords)) + raise CoordinateNotFoundError(msg) + return coord + + def get_bounds_of_distribution(bounds_pairing_key, desired_units): """ Gets the bounds of the distribution and converts the units of the diff --git a/lib/improver/tests/helper_functions_ensemble_calibration.py b/lib/improver/tests/helper_functions_ensemble_calibration.py index 8562fd9353..bb96652d8e 100644 --- a/lib/improver/tests/helper_functions_ensemble_calibration.py +++ b/lib/improver/tests/helper_functions_ensemble_calibration.py @@ -51,11 +51,14 @@ def set_up_probability_above_threshold_cube( Create a cube containing multiple probability_above_threshold values for the coordinate. """ - cube = Cube(data, standard_name=phenomenon_standard_name, + cube_long_name = ( + "probability_of_{}_above_threshold".format(phenomenon_standard_name)) + cube = Cube(data, long_name=cube_long_name, units=phenomenon_units) + coord_long_name = "{}_threshold".format(phenomenon_standard_name) cube.add_dim_coord( - DimCoord(forecast_thresholds, - long_name='probability_above_threshold', units='degreesC'), 0) + DimCoord(forecast_thresholds, long_name=coord_long_name, + units='degreesC'), 0) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) @@ -93,11 +96,14 @@ def set_up_probability_above_threshold_spot_cube( Create a cube containing multiple realizations, where one of the dimensions is an index used for spot forecasts. """ - cube = Cube(data, standard_name=phenomenon_standard_name, + cube_long_name = ( + "probability_of_{}_above_threshold".format(phenomenon_standard_name)) + cube = Cube(data, long_name=cube_long_name, units=phenomenon_units) + coord_long_name = "{}_threshold".format(phenomenon_standard_name) cube.add_dim_coord( - DimCoord(forecast_thresholds, - long_name='probability_above_threshold', units='degreesC'), 0) + DimCoord(forecast_thresholds, long_name=coord_long_name, + units='degreesC'), 0) time_origin = "hours since 1970-01-01 00:00:00" calendar = "gregorian" tunit = Unit(time_origin, calendar) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 86336ed4f2..99d0a5f710 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -45,7 +45,7 @@ import (choose_set_of_percentiles, create_cube_with_percentiles, insert_lower_and_upper_endpoint_to_1d_array, concatenate_2d_array_with_2d_array_endpoints, - get_bounds_of_distribution, + find_coordinate, get_bounds_of_distribution, reshape_array_to_have_probabilistic_dimension_at_the_front) from improver.tests.helper_functions_ensemble_calibration import ( set_up_cube, @@ -309,6 +309,46 @@ def test_unknown_sampling_option(self): choose_set_of_percentiles(no_of_percentiles, sampling="unknown") +class Test_find_coordinate(IrisTest): + + """Test the find_coordinate function.""" + + def setUp(self): + self.current_temperature_forecast_cube = ( + _add_forecast_reference_time_and_forecast_period( + set_up_probability_above_threshold_temperature_cube())) + + def test_full_match(self): + """ + Test that the returned value is a dimension coordinate with the + expected name where the full name of the coordinate is provided. + """ + cube = self.current_temperature_forecast_cube + result = find_coordinate(cube, "air_temperature_threshold") + self.assertIsInstance(result, DimCoord) + self.assertIn("air_temperature_threshold", result.name()) + + def test_partial_match(self): + """ + Test that the returned value is a dimension coordinate with the + expected name where a partial name for the coordinate is provided. + """ + cube = self.current_temperature_forecast_cube + result = find_coordinate(cube, "threshold") + self.assertIsInstance(result, DimCoord) + self.assertIn("threshold", result.name()) + + def test_exception_raised(self): + """ + Test that an exception is raised, if the desired coordinate is not + within the cube being searched. + """ + cube = self.current_temperature_forecast_cube + msg = "The coordinate of name" + with self.assertRaisesRegexp(CoordinateNotFoundError, msg): + find_coordinate(cube, "nonsense") + + class Test_get_bounds_of_distribution(IrisTest): """Test the get_bounds_of_distribution plugin.""" @@ -320,7 +360,7 @@ def setUp(self): def test_basic(self): """Test that the result is a numpy array.""" - cube_name = "air_temperature" + cube_name = "air_temperature_threshold" cube_units = Unit("degreesC") result = get_bounds_of_distribution(cube_name, cube_units) self.assertIsInstance(result, np.ndarray) @@ -329,7 +369,7 @@ def test_check_data(self): """ Test that the expected results are returned for the bounds_pairing. """ - cube_name = "air_temperature" + cube_name = "air_temperature_threshold" cube_units = Unit("degreesC") bounds_pairing = (-40, 50) result = ( @@ -342,7 +382,7 @@ def test_check_unit_conversion(self): if the units of the bounds_pairings need to be converted to match the units of the forecast. """ - cube_name = "air_temperature" + cube_name = "air_temperature_threshold" cube_units = Unit("fahrenheit") bounds_pairing = (-40, 122) # In fahrenheit result = ( diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 9a782e7aab..65db9d07a7 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -63,7 +63,7 @@ def setUp(self): def test_basic(self): """Test that the plugin returns two numpy arrays.""" cube = self.current_temperature_forecast_cube - threshold_points = cube.coord("probability_above_threshold").points + threshold_points = cube.coord("air_temperature_threshold").points probabilities_for_cdf = cube.data.reshape(3, 9) bounds_pairing = (-40, 50) plugin = Plugin() @@ -79,7 +79,7 @@ def test_bounds_of_threshold_points(self): the bounds_pairing. """ cube = self.current_temperature_forecast_cube - threshold_points = cube.coord("probability_above_threshold").points + threshold_points = cube.coord("air_temperature_threshold").points probabilities_for_cdf = cube.data.reshape(3, 9) bounds_pairing = (-40, 50) plugin = Plugin() @@ -95,7 +95,7 @@ def test_probability_data(self): represent the extreme ends of the Cumulative Distribution Function. """ cube = self.current_temperature_forecast_cube - threshold_points = cube.coord("probability_above_threshold").points + threshold_points = cube.coord("air_temperature_threshold").points probabilities_for_cdf = cube.data.reshape(3, 9) zero_array = np.zeros(probabilities_for_cdf[:, 0].shape) one_array = np.ones(probabilities_for_cdf[:, 0].shape) @@ -270,10 +270,10 @@ def test_probabilities_not_monotonically_increasing(self): plugin._probabilities_to_percentiles( cube, percentiles, bounds_pairing) - def test_result_cube_has_no_probability_above_threshold_coordinate(self): + def test_result_cube_has_no_air_temperature_threshold_coordinate(self): """ Test that the plugin returns a cube with coordinates that - do not include the probability_above_threshold coordinate. + do not include the air_temperature_threshold coordinate. """ cube = self.current_temperature_forecast_cube percentiles = [0.1, 0.5, 0.9] @@ -282,7 +282,7 @@ def test_result_cube_has_no_probability_above_threshold_coordinate(self): result = plugin._probabilities_to_percentiles( cube, percentiles, bounds_pairing) for coord in result.coords(): - self.assertNotEqual(coord.name(), "probability_above_threshold") + self.assertNotEqual(coord.name(), "air_temperature_threshold") def test_check_data(self): """ @@ -324,7 +324,7 @@ def test_check_single_threshold(self): [41.6, 29., 3.2]]]]) for acube in self.current_temperature_forecast_cube.slices_over( - "probability_above_threshold"): + "air_temperature_threshold"): cube = acube break percentiles = [0.1, 0.5, 0.9] diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 463eabac9a..1589472211 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -435,7 +435,7 @@ def setUp(self): data[0] -= 1 data[1] += 1 data[2] += 3 - cube = set_up_cube(data, "air_temperature", "degreesC") + cube = set_up_cube(data, "air_temperature_threshold", "degreesC") cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( From 9120b76df8ffd0335bc32667bdaa305938de0246 Mon Sep 17 00:00:00 2001 From: Gavin Evans Date: Mon, 5 Jun 2017 16:33:17 +0100 Subject: [PATCH 0132/1367] Corrections following rebase. --- ...oupling_EnsembleCopulaCouplingUtilities.py | 17 +++++++------- ...mble_copula_coupling_EnsembleReordering.py | 12 +++++----- ..._GeneratePercentilesFromMeanAndVariance.py | 6 ++--- ...ng_GeneratePercentilesFromProbabilities.py | 21 +++++++++-------- ...la_coupling_RebadgePercentilesAsMembers.py | 4 ++-- ...ble_copula_coupling_ResamplePercentiles.py | 23 ++++++++++--------- 6 files changed, 43 insertions(+), 40 deletions(-) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py index 99d0a5f710..88f5f6da6e 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleCopulaCouplingUtilities.py @@ -50,7 +50,7 @@ from improver.tests.helper_functions_ensemble_calibration import ( set_up_cube, set_up_temperature_cube, set_up_spot_temperature_cube, - _add_forecast_reference_time_and_forecast_period, + add_forecast_reference_time_and_forecast_period, set_up_probability_above_threshold_temperature_cube) @@ -110,13 +110,13 @@ class Test_create_cube_with_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.cube_data = current_temperature_forecast_cube.data current_temperature_spot_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_spot_temperature_cube())) self.cube_spot_data = ( current_temperature_spot_forecast_cube.data) @@ -315,7 +315,7 @@ class Test_find_coordinate(IrisTest): def setUp(self): self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_temperature_cube())) def test_full_match(self): @@ -355,7 +355,7 @@ class Test_get_bounds_of_distribution(IrisTest): def setUp(self): self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_temperature_cube())) def test_basic(self): @@ -447,7 +447,7 @@ class Test_reshape_array_to_have_probabilistic_dimension_at_the_front( def setUp(self): """Set up temperature cube.""" cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) percentile_points = np.arange(len(cube.coord("realization").points)) cube.coord("realization").points = percentile_points @@ -537,8 +537,9 @@ def test_percentile_is_dimension_coordinate_multiple_timesteps(self): cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) plen = 1 percentile_cube = ( - _add_forecast_reference_time_and_forecast_period( - cube, time_point=np.array([402295.0, 402296.0]))) + add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]), + fp_point=[2.0, 3.0])) reshaped_array = ( reshape_array_to_have_probabilistic_dimension_at_the_front( percentile_cube[0].data, percentile_cube, "percentile", plen)) diff --git a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py index 479d3df903..54913769d6 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_EnsembleReordering.py @@ -44,7 +44,7 @@ EnsembleReordering as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_cube, set_up_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + add_forecast_reference_time_and_forecast_period) class Test__recycle_raw_ensemble_members(IrisTest): @@ -66,10 +66,10 @@ def setUp(self): data[2] += 3 cube = set_up_cube(data, "air_temperature", "degreesC") self.realization_cube = ( - _add_forecast_reference_time_and_forecast_period(cube.copy())) + add_forecast_reference_time_and_forecast_period(cube.copy())) cube.coord("realization").rename("percentile") self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period(cube)) + add_forecast_reference_time_and_forecast_period(cube)) def test_realization_for_equal(self): """ @@ -215,10 +215,10 @@ def test_realization_for_greater_than_check_data_lots_of_members(self): realizations=np.arange(0, 9)) self.realization_cube = ( - _add_forecast_reference_time_and_forecast_period(cube.copy())) + add_forecast_reference_time_and_forecast_period(cube.copy())) cube.coord("realization").rename("percentile") self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period(cube)) + add_forecast_reference_time_and_forecast_period(cube)) expected = np.array([[[[4., 4.625, 5.25], [5.875, 6.5, 7.125], @@ -539,7 +539,7 @@ def setUp(self): add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.post_processed_percentiles = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.post_processed_percentiles.coord("realization").rename( "percentile") diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py index 5b7eb249dd..ec0f28ea11 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromMeanAndVariance.py @@ -44,7 +44,7 @@ GeneratePercentilesFromMeanAndVariance as Plugin) from improver.tests.helper_functions_ensemble_calibration import( set_up_spot_temperature_cube, set_up_temperature_cube, - _add_forecast_reference_time_and_forecast_period) + add_forecast_reference_time_and_forecast_period) class Test__mean_and_variance_to_percentiles(IrisTest): @@ -57,7 +57,7 @@ def setUp(self): add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) self.current_temperature_spot_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_spot_temperature_cube())) def test_check_data(self): @@ -279,7 +279,7 @@ class Test_process(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) def test_basic(self): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py index 65db9d07a7..9b8beeff3e 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_GeneratePercentilesFromProbabilities.py @@ -42,7 +42,7 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( GeneratePercentilesFromProbabilities as Plugin) from improver.tests.helper_functions_ensemble_calibration import( - _add_forecast_reference_time_and_forecast_period, + add_forecast_reference_time_and_forecast_period, set_up_probability_above_threshold_cube, set_up_probability_above_threshold_temperature_cube, set_up_probability_above_threshold_spot_temperature_cube) @@ -57,7 +57,7 @@ class Test__add_bounds_to_thresholds_and_probabilities(IrisTest): def setUp(self): self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_temperature_cube())) def test_basic(self): @@ -132,10 +132,10 @@ class Test__probabilities_to_percentiles(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_temperature_cube())) self.current_temperature_spot_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_spot_temperature_cube())) def test_basic(self): @@ -188,7 +188,7 @@ def test_simple_check_data(self): data = data[:, np.newaxis, np.newaxis, np.newaxis] self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_cube( data, "air_temperature", "1", forecast_thresholds=[8, 10, 12], y_dimension_length=1, @@ -236,8 +236,9 @@ def test_check_data_multiple_timesteps(self): data, "air_temperature", "degreesC", timesteps=2, x_dimension_length=2, y_dimension_length=2) self.probability_cube = ( - _add_forecast_reference_time_and_forecast_period( - cube, time_point=np.array([402295.0, 402296.0]))) + add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]), + fp_point=[2.0, 3.0])) cube = self.probability_cube percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) @@ -256,7 +257,7 @@ def test_probabilities_not_monotonically_increasing(self): data = data[:, np.newaxis, np.newaxis, np.newaxis] self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_cube( data, "air_temperature", "1", forecast_thresholds=[8, 10, 12], y_dimension_length=1, @@ -354,7 +355,7 @@ def test_lots_of_probability_thresholds(self): temperature_values = np.arange(0, 30) cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_cube( input_probs, "air_temperature", "1", forecast_thresholds=temperature_values))) @@ -444,7 +445,7 @@ class Test_process(IrisTest): def setUp(self): """Set up temperature cube.""" self.current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_probability_above_threshold_temperature_cube())) def test_check_data_specifying_percentiles(self): diff --git a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py index 89b9d42a0a..d265920f90 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_RebadgePercentilesAsMembers.py @@ -44,7 +44,7 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( RebadgePercentilesAsMembers as Plugin) from improver.tests.helper_functions_ensemble_calibration import ( - set_up_temperature_cube, _add_forecast_reference_time_and_forecast_period) + set_up_temperature_cube, add_forecast_reference_time_and_forecast_period) class Test_process(IrisTest): @@ -55,7 +55,7 @@ class Test_process(IrisTest): def setUp(self): cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_temperature_cube())) percentile_points = np.arange(len(cube.coord("realization").points)) cube.coord("realization").points = percentile_points diff --git a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py index 1589472211..cdcb54fd32 100644 --- a/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py +++ b/lib/improver/tests/test_ensemble_copula_coupling_ResamplePercentiles.py @@ -40,7 +40,7 @@ from improver.ensemble_copula_coupling.ensemble_copula_coupling import ( ResamplePercentiles as Plugin) from improver.tests.helper_functions_ensemble_calibration import( - _add_forecast_reference_time_and_forecast_period, + add_forecast_reference_time_and_forecast_period, set_up_cube, set_up_spot_temperature_cube) @@ -58,11 +58,11 @@ def setUp(self): data[2] += 3 cube = set_up_cube(data, "air_temperature", "degreesC") self.realization_cube = ( - _add_forecast_reference_time_and_forecast_period(cube.copy())) + add_forecast_reference_time_and_forecast_period(cube.copy())) cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period(cube)) + add_forecast_reference_time_and_forecast_period(cube)) def test_basic(self): """Test that the plugin returns two numpy arrays.""" @@ -157,9 +157,9 @@ def setUp(self): cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period(cube)) + add_forecast_reference_time_and_forecast_period(cube)) spot_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_spot_temperature_cube())) spot_cube.convert_units("degreesC") spot_cube.coord("realization").rename("percentile") @@ -216,7 +216,7 @@ def test_simple_check_data(self): data = data[:, np.newaxis, np.newaxis, np.newaxis] current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_cube( data, "air_temperature", "1", y_dimension_length=1, x_dimension_length=1))) @@ -281,8 +281,9 @@ def test_check_data_multiple_timesteps(self): cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period( - cube, time_point=np.array([402295.0, 402296.0]))) + add_forecast_reference_time_and_forecast_period( + cube, time_point=np.array([402295.0, 402296.0]), + fp_point=[2.0, 3.0])) cube = self.percentile_cube percentiles = [0.2, 0.6, 0.8] bounds_pairing = (-40, 50) @@ -311,7 +312,7 @@ def test_check_single_threshold(self): data = data[:, np.newaxis, np.newaxis, np.newaxis] current_temperature_forecast_cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_cube( data, "air_temperature", "1", realizations=[0], @@ -352,7 +353,7 @@ def test_lots_of_input_percentiles(self): percentiles_values = np.linspace(0, 1, 30) cube = ( - _add_forecast_reference_time_and_forecast_period( + add_forecast_reference_time_and_forecast_period( set_up_cube(input_forecast_values, "air_temperature", "1", realizations=np.arange(30)))) cube.coord("realization").rename("percentile") @@ -439,7 +440,7 @@ def setUp(self): cube.coord("realization").rename("percentile") cube.coord("percentile").points = np.array([0.1, 0.5, 0.9]) self.percentile_cube = ( - _add_forecast_reference_time_and_forecast_period(cube)) + add_forecast_reference_time_and_forecast_period(cube)) def test_check_data_specifying_percentiles(self): """ From 7ae390077dd2f07dbd1bc3ed3e38192cee676e33 Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Mon, 5 Jun 2017 17:42:37 +0100 Subject: [PATCH 0133/1367] Actually updated Acknowledgements --- ACKNOWLEDGEMENTS.md | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/ACKNOWLEDGEMENTS.md b/ACKNOWLEDGEMENTS.md index bfe9de717c..9559cb604c 100644 --- a/ACKNOWLEDGEMENTS.md +++ b/ACKNOWLEDGEMENTS.md @@ -8,6 +8,9 @@ Iris (https://github.com/SciTools/iris), LGPL: BATS (https://github.com/sstephenson/bats), MIT-style: - tests/bin/bats\*, unaltered, from commit 0360811 -Automated generation of API documentation from sphinx-build: - - Based on solution posted by BowenFu: +Sphinx (http://www.sphinx-doc.org), BSD: + - doc/Makefile, doc/source/conf.py and doc/source/index.rst derived + from defaults generated by `sphinx-quickstart`. + - Generation of API documentation during build uses + `sphinx-apidoc` and is based on solution posted by BowenFu: https://github.com/rtfd/readthedocs.org/issues/1139 From bbb6b06e82b82aa8cd2e7a358ec8929a0a33e0fb Mon Sep 17 00:00:00 2001 From: Aaron Hopkinson Date: Tue, 6 Jun 2017 09:51:02 +0100 Subject: [PATCH 0134/1367] Reduced sphinx build output in Travis --- bin/improver-tests | 2 +- doc/build/doctrees/environment.pickle | Bin 0 -> 1395688 bytes doc/build/doctrees/improver.doctree | Bin 0 -> 24516 bytes .../improver.ensemble_calibration.doctree | Bin 0 -> 155713 bytes doc/build/doctrees/improver.grids.doctree | Bin 0 -> 4315 bytes doc/build/doctrees/index.doctree | Bin 0 -> 5323 bytes doc/build/doctrees/modules.doctree | Bin 0 -> 2730 bytes doc/build/html/.buildinfo | 4 + .../ensemble_calibration.html | 1509 +++ .../ensemble_calibration_utilities.html | 397 + doc/build/html/_modules/improver/nbhood.html | 252 + .../html/_modules/improver/threshold.html | 196 + doc/build/html/_modules/index.html | 92 + .../improver.ensemble_calibration.txt | 30 + doc/build/html/_sources/improver.grids.txt | 22 + doc/build/html/_sources/improver.txt | 38 + doc/build/html/_sources/index.txt | 18 + doc/build/html/_sources/modules.txt | 7 + doc/build/html/_static/ajax-loader.gif | Bin 0 -> 673 bytes doc/build/html/_static/basic.css | 611 + doc/build/html/_static/classic.css | 261 + doc/build/html/_static/comment-bright.png | Bin 0 -> 3500 bytes doc/build/html/_static/comment-close.png | Bin 0 -> 3578 bytes doc/build/html/_static/comment.png | Bin 0 -> 3445 bytes doc/build/html/_static/default.css | 1 + doc/build/html/_static/doctools.js | 287 + doc/build/html/_static/down-pressed.png | Bin 0 -> 347 bytes doc/build/html/_static/down.png | Bin 0 -> 347 bytes doc/build/html/_static/file.png | Bin 0 -> 358 bytes doc/build/html/_static/jquery-1.11.1.js | 10308 ++++++++++++++++ doc/build/html/_static/jquery.js | 4 + doc/build/html/_static/minus.png | Bin 0 -> 173 bytes doc/build/html/_static/plus.png | Bin 0 -> 173 bytes doc/build/html/_static/pygments.css | 65 + doc/build/html/_static/searchtools.js | 651 + doc/build/html/_static/sidebar.js | 159 + doc/build/html/_static/underscore-1.3.1.js | 999 ++ doc/build/html/_static/underscore.js | 31 + doc/build/html/_static/up-pressed.png | Bin 0 -> 345 bytes doc/build/html/_static/up.png | Bin 0 -> 345 bytes doc/build/html/_static/websupport.js | 808 ++ doc/build/html/genindex.html | 346 + .../html/improver.ensemble_calibration.html | 640 + doc/build/html/improver.grids.html | 117 + doc/build/html/improver.html | 210 + doc/build/html/index.html | 112 + doc/build/html/modules.html | 124 + doc/build/html/objects.inv | 9 + doc/build/html/py-modindex.html | 143 + doc/build/html/search.html | 107 + doc/build/html/searchindex.js | 1 + 51 files changed, 18560 insertions(+), 1 deletion(-) create mode 100644 doc/build/doctrees/environment.pickle create mode 100644 doc/build/doctrees/improver.doctree create mode 100644 doc/build/doctrees/improver.ensemble_calibration.doctree create mode 100644 doc/build/doctrees/improver.grids.doctree create mode 100644 doc/build/doctrees/index.doctree create mode 100644 doc/build/doctrees/modules.doctree create mode 100644 doc/build/html/.buildinfo create mode 100644 doc/build/html/_modules/improver/ensemble_calibration/ensemble_calibration.html create mode 100644 doc/build/html/_modules/improver/ensemble_calibration/ensemble_calibration_utilities.html create mode 100644 doc/build/html/_modules/improver/nbhood.html create mode 100644 doc/build/html/_modules/improver/threshold.html create mode 100644 doc/build/html/_modules/index.html create mode 100644 doc/build/html/_sources/improver.ensemble_calibration.txt create mode 100644 doc/build/html/_sources/improver.grids.txt create mode 100644 doc/build/html/_sources/improver.txt create mode 100644 doc/build/html/_sources/index.txt create mode 100644 doc/build/html/_sources/modules.txt create mode 100644 doc/build/html/_static/ajax-loader.gif create mode 100644 doc/build/html/_static/basic.css create mode 100644 doc/build/html/_static/classic.css create mode 100644 doc/build/html/_static/comment-bright.png create mode 100644 doc/build/html/_static/comment-close.png create mode 100644 doc/build/html/_static/comment.png create mode 100644 doc/build/html/_static/default.css create mode 100644 doc/build/html/_static/doctools.js create mode 100644 doc/build/html/_static/down-pressed.png create mode 100644 doc/build/html/_static/down.png create mode 100644 doc/build/html/_static/file.png create mode 100644 doc/build/html/_static/jquery-1.11.1.js create mode 100644 doc/build/html/_static/jquery.js create mode 100644 doc/build/html/_static/minus.png create mode 100644 doc/build/html/_static/plus.png create mode 100644 doc/build/html/_static/pygments.css create mode 100644 doc/build/html/_static/searchtools.js create mode 100644 doc/build/html/_static/sidebar.js create mode 100644 doc/build/html/_static/underscore-1.3.1.js create mode 100644 doc/build/html/_static/underscore.js create mode 100644 doc/build/html/_static/up-pressed.png create mode 100644 doc/build/html/_static/up.png create mode 100644 doc/build/html/_static/websupport.js create mode 100644 doc/build/html/genindex.html create mode 100644 doc/build/html/improver.ensemble_calibration.html create mode 100644 doc/build/html/improver.grids.html create mode 100644 doc/build/html/improver.html create mode 100644 doc/build/html/index.html create mode 100644 doc/build/html/modules.html create mode 100644 doc/build/html/objects.inv create mode 100644 doc/build/html/py-modindex.html create mode 100644 doc/build/html/search.html create mode 100644 doc/build/html/searchindex.js diff --git a/bin/improver-tests b/bin/improver-tests index 014f83b6e7..038e0de179 100755 --- a/bin/improver-tests +++ b/bin/improver-tests @@ -47,7 +47,7 @@ echo_ok "pylint -E" # Build documentation as test. cd $IMPROVER_DIR/doc -make html +make html 1>/dev/null echo_ok "sphinx-build -b html" cd - diff --git a/doc/build/doctrees/environment.pickle b/doc/build/doctrees/environment.pickle new file mode 100644 index 0000000000000000000000000000000000000000..f58d4a2e388b0cda67520f63705a53b567665694 GIT binary patch literal 1395688 zcmb@PbzofAvBuM+AqR?Sn3}Y8+R9FxG*pOXX=N)GMbg?z+H|{Gt)y+FUG3SGZKYM) zw2k{xW@ct)W@ct)W`;N4nKN_m-Mx1`Ug?kb>e$lvJ2*3E4%~+vy0O$)ny*y%Z7Nsy zR9vmPSgtlVY+J4@%;aK!QNLNOe&}lbu#Nd6XBK8F3*|=r=JJcn&C0@t0s6aYaj~4Q zR_gtgQgcK7@bhk{A5kh4rb(+=sTK+w8fE(HBd@)2ynfVl{?tmfS$51W;5TOqmFgaX z*W60|=+*pT#igbChLQZyg}p^ron5Y$>c@=ak7!n!fFHYBKW^jH>4zM0NM&)!)%KJf zmA_G5oL(pwO2viBv@13%wQBwN(oC&{0yZ{PYcu7>2LJmT>bIc6-Ey^ltBv`iU2$)t zw(Lsf`mOVaL*;AKeDt?Y&7W4x4Slf)O4|Q zoh#2a>L*^CKdds-sGoFg{&4=$ldoOQA61&CR&?cR{gi9#r?wVkvb>L4TcK*a`=TEwJp6WMGwc0p;+SDy$ zzo}(CkIH)I)%smf*3=)k>-^dCcc9|T-+mtWBS~bjHnWVr?eX+YtM$!FKR=az0nrbm zU!?A?Zz14?tM!Wjl)t+hF*Zev<6{NgH0RJ^7kktttMyA2by+Ivazfp%?f131cPIKi zR_pf!y~B^)iwbl}RG`BlSe+^Ft6$;Ev30fHtL501T8=&{$GP-tyUPou+G6>vX6>xO zFahq=I16JP^Vg64v+Mm-s@!UQ0Hrej(snB2RxRUle#1;{u}GyXHjDKgtM$Q+`NJB` znfjHh^_^r4XyP?6&xUql>Qrmhh2nI1p-~@Rt&h;up)TbLj^>Y9TItdM9LpcFzkXHz zsJU{L|9yOZ9Dg;DKcZ1Cy3%}oQvQU$&M)VW2*SHo>$^A3kIs{V*q?fAtLLw|v3{TZ(~p?< z1)86lzdC<7|G>5M1NWsLxc0_+0Y5M~KS?tKKXA;1FY;Mqn56fkA1l(2-S5WwH2v5z z)!WtPYD=(2je2P{e`Ia?{^e4$QJ+~|&fl!GS}$*$pIM!!23*b`y0lWCU9HdIZ|d_K zr*6|W;x^S9bJO+8{Kferc)HZ@zkg~Z{$puoFx55|U=}OQO1ZI#%mW#OLaA1B)L+GB zxqjXJ@!llB?s`MIuz#x26=qYe(X1?zW?!v5J6kDLXnZyXYAzSdbzfwyH_?z@BFjsT zfXY=|D9kOF8;$xRmE{OxFP5A0wV8Ug+X`d8QoWXhEbX6~ z?5eu4RxQ=YpSfnCv^-rdVEz=EwZfK}dcC!6-TtX-yMi4=+twRIg~9O-tlr!|RqYPh zyR@{hlITlUTa=cKcP9Dog_Q#Np5kJoK%Gu*eL304d-hLl>8cR*z4OP<)4;z+SHB%~ zQGFlltaPQbsjpV0z-GC&+!&`}RGuNLGhLjOuPw}M+E#2-N|XAZWS>fyW~=k(#;4hX_D^k(|2|9eFOQTfbMw;_G=WC7 z#_wY`k##M=u@?3fRtndZUA4SWpcyttPW{12^@r@AT8OKgt=7Rb`f%@5VQ>=dJ~%ou zQNN+Bogcb?>guja5e=bDrTKE{y26qx&rsA`bA{S$VX<7S7G@WUbM=Q25|Ndf(TDG! zn(K;qF!kQn+h5q#JCx7WZ)~gb{;uu#L5%il%`KAoF1e*fVNoVQ{Sj1{m>vJW{ZqG& zH-!$+`b|81`{Ms<8Gh*Row@Om+)$w}H#Afj@9iJVPt+etLoYeM9@VwMwKqcj(Nx{Y z;67&mR7(8$*!hbwQM((2o3@v$Wm@Ny$I7&NAyY)b^8gv+VH$qD)tOyIS1DF$S@XC= zK_0(<>XGq=wa>hRaOAm($-!YVcZI&u+`zzK-ym7MiNe76=x||Za3t3|UKk$jr*0Y; z>fK&{LfasH;{K^-cV)Eq&!*()EbMjUu*$Cfq~s`la<|b&+(gu>CF&*wRz4VL)Sp6- zgk5;*{;7L+S7WO`u0Jg?44%G!YPCDyK{RpGQTRx;o++eTgl8n%`kDKu?iw$XVb0f| zMUy+4WzXI}m5VDJ#NZC$XcQaPpOa{?=kA}nC@yU%+$M3Y&r3SL=kK37J1(V`uf;Xj zUy!KW3%ebP8u-S;Gb!7Pl6~;v{mE3oOV}0<$IH8xR@F9nX_EW0{ZkL~;A4!Nm0U)f0OO8(Pf%>=0 zVMTlYM*a1x^*2Oo+{0X>S%2ed{Y|mIDp9KM&8zjdY|J0yYPDvev9dT_Td2QPmhYT` zXl$aN+c4q(ZA1NSl%ab2YW*D>^T#*Ki%YcbFOT`PM*W>5^JkCbkC`dcQYWMV-!(#c z%Ua2&z1}^N-%y!>znra*y?@ULC9?9zibk)SYt-L6vOJCQOE&L)tINyz)3k&u zHC>qk+V{`RPS1; zFk6|cE-y~g8l(QP)%?+@hF@#eKTiKus&Fc(QU3%o3>V~2h$mg6bn7Qq>z~>7An>9rupXL!a4LGOz6o?-3;o@`WG0}SO4P1`OnRN ze(GKwHGOv1t!!?(VyR8j|7A~~D1V8Sx7EMQ%4;a?HqrbFYmU~xx@OH$(fpdvfcEr| zx1{9P*I9Vb)C|j%ETL?5v$b{Q?}Mn2zTrEvrw2*=`ZpQ0%>?Su-d;@z-{SVUvi|Kg zx6hTL`5o5G*T1`F&Ae!Sk2NRj-)GG=kGe@w{sAlRuK#e&rQI!>KVr>Y^&hWUbC+oT zgf*|K|8&ioSBd7&SaYKO^EGQuh~_U?bG-h`HEWKG=CAyku%~B<(k_&#sQ;R!k@d?q zXJUF}Ze`B7nM$LCT`AQU>~F$Er@gS{rTT9fikS7lP)ehgD{a%9|F);c3kv0T++gMU z?-^j5;MZ4{U=>;`75=)XEb4z?^_lt~*W4X5qWLG*oU8wtHP<{D=S2B0tUO!)D=YV# z8vB4fJS+NtWBpS7?`v+2l4$;eHK*(UT(jo1X#R^e7wi9Cv*w~`{)aUe>i=cUbs1+1 zqW(Wtuew7hPGG=RQ|EP@Db*nFZbqt1sJKIwu!absBFcxU^1QqGdMM9}^5Lqy?2cFu zu2zo|G-APKhtRto4N@r98eRndn zvC^UlyjWS7EjF6&6h#bmM3|!wm6jTv3hFVQx=?8}u^rKHrzsX2J!@+}RTbjtAo|7Q zh3*UmtzjHoD9UH5@)ho^^-#V-l+T7Tb|`v!=F1C9?)IvVjmGQ->e%&ON(kqG!2PgH z`+_}<3dNr1D#jKE;+I-&7XGfMM>U0a2k?++?&;wyfxDx^klf0ylZ>^Ft~7`W?M|Ta z3@y>2jO*M+h1qJ>8Rj*GbzW36S6-^Qrn|GkY!O~3bHI|qx(is?LF(zDkbb&GJ$F}y zt^0x@AOzk6_z51~tS$8{l=qYudMKwvXGrLzoZF-Yxu2;&?artt1t*2tj6&U@NXJsUv0J;2&N3}D+{KEsMKhfSGyaWbg?R~>?1cO(m4=hMREb8z z(4jF!RCt$x=bgcnyIeVIa0XMNe0M0{%C}ylS#-_V%(#bw?qeF22Qf2RP*RwCqA+Z2 z$$*rLO?NLXgRxONF7a7ZXjg#7zNA{~2}W?M0@v=-RaJ<+AgUe2xj(l}c~^97vkw_?bAJy73LtA+;4Oo2^o2Jr2=iO*~z{HO`+`sjazeid3Lt!+>pYq>^Mjy<~ZO? z>|tRc4om$aw>Y9mn}3cR&jDPLA_P7PI0uagU)-3IjYFs7l!v5{t^x_iy?S~=AM;M0 z8&~8`Q~P%6%!IO#CkU|Y9Y*PLIm+ZF=`UAZ9_*4AHu#ust_fm>9e!=a-|LF}i!l zCSfR6FA*`JB&4fB!U>+S6Z0O5>S1d0m@=iX6``Y1!OP~KPQ z!wEBY9|d(@taFVpp6PcZRzgf**8+yva%^RMxmp;+jf^6l_uW$-s@0aJ4!e0RLTB&B zMQ8+(s14F(gzfY~3WHeYd1oi&e!b{s@^+5>g!`o;U*wwPLf-M;Tc+KnE;fgq>4f=Zys( zII~cQfGL84Z2*hIr(-J<3l$o#+sT^K$qG8h?3&8#9MOisieHbIt!e^X29R5Ha+wUX z2f}^qQJTdT(UzfZ&yj0PNC|2$P*@fOmpFmk$UenfVi=?;?hq|>p%-nc!Z}^K`qg14`n-5fY=RCv#OkKLV6lWF1 zIoB5_=N#q+&fL-;tF_&g=Dgg49QC(++=H}8y9^Cq-d8H~O`)h+4h)f!Js2fpBjpVt z$~95BzFcU!hiEY@-I8JrBQ2rb02=PPk&&Qw$Vt(Mae{^B(L)t!F;Q7HZAi+_PmIGfJ;B}x>}`FeeegD1c5`LlHs$&jw_l-K4bYVE zB_SuwM}T>TXUcH*!=GB64ldI88!s0dv=sk8rH>itX${^HqX(Mq=jtsUqbML|>0j{*7=U-y*I zc7QH?G~8nq-+8UeHpmn+dtoWV<4^{q?t*zADerZUSLVfro=>svBqI7xfIbJ!nAW4E z(z2twE$)fR*lJ*;jjp(y(4GVu&nL8KZ*>LsGq;c1^2y5Vyp-a#%Q_62oYafZ&-`m4$A*$VBPNas$?g2ohu z`5Z8L9Wdfg#1?vIX54cXXIY=~CRx<9k&sZH7g73<>+rRA&sU0NwUJD4L^8s70T?_h z#KiIPYj|7VgnOZ~EDe}x;37rAz6jXkylL^yEx&|?gL|>^I&Xk;hoyKm))C4}K#8sV zxdC6Q5K9B5K)j4FUIs>pgpd~U|CrlHsSWpX1(_nD_QX)6CA3$7#wNM1ys&`S-n~*W z7L#0}UyBBvh=~5HpwCly+e)+C>s)aK5f6oS?$ye%42Eg?^&);fg0(6N^fiET)PWhC z73R9vY5}^9Iy%WqXgiFYsj4J2~mN+1#q4vgH_tnLgI!Z z0a{kMw<^|@Y{Z)=QB7OK1ot-JxDP{@kJRY?;@cHv>BB64K;si?!g>c-*uEWGp_NIc z>E5Z7Er#$V;nlr>2)_%$90I~_mndgia%KPK#saS!oqM;kI`6@;!Gtp?MU9uGDg}5C z3c$ud(_;PQ1)qz4uYxVj>sUz$E=EP+`gbVTD;tYUY%S6b1VcVA-&QxMH$Kck#>aqe`?G zmW&vMwS@JtsEQ$BO~DYg#c;7itHzHj&@xamGT<>mp?(5Xo*kNDL_6R==teRUneLOy zwk(hkfn=mETZE+ypF$bfs`)4;^4IRuing?KR*>Q22%&ujG(2!L=2tF-iP~&)f*x!t zyU!}jvW|$IyQjn+7D17eus#PCPkqj9`WC1y)0G02tfz5fFJFo{v|J-o9-`_mE5|bJ8|B!vkLyTvz5)zqM*YaxSzc+luPVcm8BM7R zRD|#~5E7{xjsuRbEQ-4U~o>?P33P4!pbjrm`)a$8YXL z<}?x#%C|saW7=0-YS0T_2o`ZkoZeY--&UH%n3h-|!nG?3_B+7hOhbWOe5pXkx7>G? zWm!YC?2xLA0KNwRTWeWh^Cso@6=R8wGFR(~t;!!HYf~-HxgRQ~>)Mcv)aFM3oayaU z7&?BF((ko%_B`YweyrFLL!gxD{jdYtCN72f2@1na%RUzalb)+^KUH|I0Zs?y=;bfm zhK&YHLQi-<1FzlRhZ_G}p_U<@7D436iq& z{>}YW!Ir2pbvcj_6zt!CNdqHE04jHhR2Ln#ZXmdYp7yt+wC}IaDtBSyCM} zzrPx_WxA-)Z${d(C@42d4&BG0j5}1Z7V}R}$d+7XiH_b!=>DXlFb@MWxlsilv76qW zz{Zxlxq>ZrxTD_@O+gj!0hAfKKub_Z0>#U%VDd-GbL5oCt{kN_ z%bFy^;ES5@js}m%5k2=oi|4`wxu^xa{n03UTd+Z~7F*D6|I%fJe+>BS;=+)lM|BqX zkM3AS_ZrN7#t@G5gn1m8Vt4sWV6<3kxZ{;-G59TU?-;%1*d{69TL8>MSZx(W6aD4c z;_^b1t|ltkGG#j$z?i78Zv{3RLga9I8xu%)(^-aFE7Y=pX-fvFkbrIzf&3XID$4-9 zSy^_sRgk4O=^^L^x|5TLMiY9%I{`c%!5lAA-8c<46Nb_46l`hMtf(O=D)bXU=Rq=)S%oLi?uSYbWeTs<*?`-hc428vAA?Cx~?G!MLe#`5 zio(4UxXFA9StQA)Y&pUfJH2^@qXKtUt|eIMZ0UohknaL=hLP0Cwi9V1O4Z;t91#@k?H(k;`~No86h zvRZo3%UZ&^04(+|$wm0Y@-%LKG~E`3TKr2!U=~*5A}aI?LFc4nsA)glZ@Nf{7N4C_ zO%f8y-9Ty2=!A{hi=~2))HLKgf{dg7HcWth3bfn*nDR+(A!7aj4 z;{qjaTr&zFy7(nzvX=}J`j^JjAMdQ z&#OV@Rt=F(e`ODyR&duS(Bh0UrYCC&>)v3+_QJ)G-$zjvOOUdE++xJ91&3Fz!{tSK z;bT{&yw{(E;f8hhRiMTFwk|wUHKmS)7&{zP4yEHng}a}!ErDZZ{xBgb@FL(Gw#MQr zp0NJ-nwwU-#mFb;Xu71ZOJFC%$=I=x8Rf1ioQxC&TLzYMi6IjdZ6dl^rCMfcMiMB} z6XqP4$wfSVC^&I9uT)DLXZr4#q<|{`vmusix9M=plFK=%tU)Jfh|Tbe~l9jJJWY>kP2`yq=Q^A&5 z+HoI;6@|GRG3gRRAz#J!_!*onLoqGU+l<5H|6VFOc3RAiL zO6h@G0!tgGCWeEQpjLt6wToXTVNS|Q=|M`f*npIkQcO&E4+f9Td|!x;qNUkG6u8aM zz@^sUs7+9yHvq~5sSn4q!;!j&D$WwF_-}Tuvn1=zA2>iVHGdjx1_#HR~;7oIEh^Kt=G(d&4s zPvxaFH=#6~p$S26Fu>zw|DDFNbC1-5SaxqR!rxX^;Xev|Zu`KO6rg*waxFeBgDYyn zdklE-`|Q}reyox#r#90LL&UbQ9|r<2io@w9#GBRSCHHv6Sgb+DQK=v%^?3ql+<)zd zclh4p6BTP&jJ7Y*GIXT`PeKVer9l=p+zE6~R;b0rROlBY!w|^`<0)XUU6B1{x+Wa{ z=$@)9i?7byv5pi4`!rxVfmoOt9O)a+4Y;Q(%wiEJH{_q^OWd#&Ex|kknB>up-Ucq` zg>}+16=_+cWk!;5O+h~kXzo1!dI;H`nHBeJ1zI{UjsgeQgN^AI zD%)bz+K)pe)P(gSuo8JVObt2-p(*nn zDnfW!R398S&Fw3f#%Ly0_PLiU#uC0IhfGpRSg!yp(;X9?8^OI&$?NEjRbEQ-DwKvz zLF{O(e{Kqw4BV@=6qW^bh6^DopU#7%kY58bXNok8jUT&kuT`|g%4Wo}aYZ4&4rJ~D zJ_ovuRz&pW<;MJYnLfl+;#EY$yo)F&=5p_2%WD(wZP{a;dgk<{QT_B$7BYA(wg*=;Y zd~)`*8RO&mhI_ZNEzvU;9Z?{ZsVa4T4@z*FFF`m1f(0YIuRqcpOxJt02$oHe)^$_1 zu$1C`C4OTkw0lOvvsG32 z9|HeO-|jqrr-p}c3U|Cbo3vRU)>2sVkE!!HLt2XR5fmkvZ0>Ey310V6gH^WYWvf7pwS)VBHO;XkQd%P`OI zjVZ!Xicg^wJj^+#6xks6X=Pi&y418%q$a@60GJ3(S`usC;LNzsDtwKhNvgb*=5r_w z`>fsN;&l{{h5r7$;w>@eUjE(4$R)%C_Js(ByzN{cio;zhHE_QzOE6`8G=j4KWo0Q)Es!}X%ml2-_F{#bhK$FGt3_U797O`CP zUV%LR*Oh4TzNr=|t|!oM0Lr6-FPhL|3y&GPZz|8S$V= ze{e-kkkVgK=Ahwztc9=``ixG-IhGb-DaB7v3O4k?-OzYizf|lR{Yk34l;&3`O}m-$ zfva=B)-qToatAZTdIJ3opgf}xxS`S%vf_nU_gf`eW)z5#4{Om9+V4Q)>>9PPJpA)} z1zBu(dRm%md%ot68r@v2$z|H$-p8&HsJoMa7(@gncGOesS9cl6zV@f<=h*#eED+OKNV&PRf?_q#$H4K{{n#hcxX<(9O3@05R2hE z(9Hkx<^p-9Wz-7Kk4SefI2n&33n=!)aybj#7@r zRCGM?5hSHnM}r!6?D0HTfj)GsuQPe*S9ot(9+adL8V4LRLy}o2dHOoaHOah#2Lm+uJJMlKe{F@@-QU z`U#+i)DJPe-{WGs+bP!)L!|_)FvqMVv=c$&DH}ag6>ujh&XNqse5NW1JUObK-xBqw zMf-}S`La7jd6w#>?2ATPQoU0_V~6fj4}SEVrX0&=T1xIAex?5Os6Kts2EYieB%h%i zlkLfRP6;N(sHsbW7QIiJbI!U%5th-zOHlO7Pm=pmf)kU zFHBE7LOj+nq#GPIwo9cNPsu{nnNydwjry^B;T+V~g^~4dXX0 zvJTbG07!{0M2UD$J$jxv%C@_Uv>cYv)$!5eNLAo>1DxY(=_Fo8hyTj)w?;13G7K5o zVma$02Px80kV{Yyo?;=XHD0DK+(=&QQY}EAp#T|qt+=9aF9SFDZ+Y#AW;d*?yIi3b zJKgyjI0S`xcQCoG?Ar^xaZAw|MRe{SitRJhwUNF!k1GoIp5Vrud0%a5#obG37RQ(3 z%%LKLD?nh6;KK!KNjifgd!Abrwbf9u6jVY^n7v?z)IKhPB1ZEreVgJ;seSt0$jhq# z#2-yFrLP(I`;r67v>1jo1C^pG z?CoHOqxt@$QQmgA9m=%$okrUCqP&n`1|tj#L+`xPV^d{!rD812nN_u*Caj%c@xlnK zw48?hf(x2-a9PS-*_f=q@p%=3!z$ERxdPTt&faO^iyN=4%^nME6YJj$$d}@^wViCj> zNszyD%83#VG}y*WE7syTI@y?*s?bYOeRXvf{I;7>uqB}CusjQb!YoHM4#6Y;4=ylam zyACj3Y=-$bMLFOF1zDVa#$+TRp)7)u*ywFpB2o9|=-r8`a>oqQql0Blm6y`gP#U&& z{%FW}nMOQ4wYa2oORZB*h6Ewu)WP92docLX`32_`W|my`VeX=|}WBDKjT0)1m^lD5>U=Ic?EZmXir*xIR2lx=BS(2S8 zdH$G~@NNK)_x=!LV>`n=R5=zCk?u$#B#eiFaYygDvsNhaEnxR>~qMgA|5~75eO_axAX3y~VwXsNV#2jy@B4823n} zSUhXT42-ZI1s1QTeP(1=abcOh8~bP_S&mSpq(oRpD31Y!GdyJb!U5RFD#Nlv-H0D} zRuKU_4gj{GJ{J*xYSiz*KVD&dh9Q}>W2uTlegeom`ofMew!b(MdZI$N8lbiGcLh~W zh))7Bq(I8`aF=Yad$PhTg9BH|iirobDJm8%Aw2~o9*B4xJW63bRaur4)-?TY6yYV5 zg!D9!xD~_H_l@qBmzwVB3bQ2n)55J*NufR?M#U%ADZWFl%Wv{M^Ps3{njOT&T zW{`(QcauSWzLM5zkiDKDUjQVJRFr4ia%F*%lCw4ULgiUTYJ1I8Nk}gOiPt?is}bGO zaxYetWwxiE*NEhV_mZe)-au0Vy-6OY;a;jZOU+VDv?vMbWl_zd4IjU;{&GcGI<4bY zPkf906#ydgjBe-Bsk!{fj2{Alzc@4x7n~sfDqLOg|AnpI4ty+)^A50rXntoJ2r1c#3`*!vY_ajA`!*r!`jp&%oW4**o91hm~#_p&1z)Q4`)rBAz5i-{z&KJMyZMD zp!{CcG9esP2QV;)B zj;TWJ)-2Kz+P^^Klfnq47s~tSz+7=bKK$eUtw>8Ir^FIb_0lv2{U4xtV*#z;O{x2@ z@+{uGttq{T0R9I6x1`t^`5Mz9H?CXkrvC6dGjsE3Gi?LPinQOacg>na{CQU zoM}yClEOX`?3Nt$;6(2b-4by}Db-?R+Lo2ELx)~ZkVgXS@xbbnzGn77R=-l=-Ku2{c&6y|-jv-PmyHF}D zQHXq&axE6GB^*#00h|p$Sd$`Bp!H3NPH(TAtzFfRZokCjgm(^jVUaUHFCg%Z3wN%< zEL};VFm3hH=MH08Upl%YYQnk$SiIzr)F+()B8See9MkqAKGtzZMej7!v&~&)D@!r% zgko@fC)pc*hbuJnMlC?Up#T|~4KfmV+KVNHd>+WBlc=t_;xD1X3-}M^-dPJ^+1g4m z|GkYgT`9#~PzsJyU@}K)ZZTMkQh5pA8g_Tp0$83_%CeK~!cu}Blz=a1XeKh+s&bnY zZwatFWH%#CA#VoxWZwfaQ1nB|q5IEQx@F0mIsRH?r2rS806h5Q9ug8p`t|EAO1B)A zm<@O7qJTI}RM;1S&89nCtnzEQ+|Yo#NU@grp0TYdYQno4cx+q3)(VEWyI5Hkmz1%s z0wv*G0uC>+11B7l#i{B`m1c>4GiZ^XFfRi$x-{m4oP5<4D-CS0xXYDlIs2bAgIZLD zeRr@qD%S4CvfDqJDv-yB)VFs>0q5HXC1`d>AgfIr_A? zk4hW|aXXZ6aeeJQo1rTu7(@w9_2yq6&hi;DT2fPNzPILP8tzIhgT(}=Kcv+vDh1ey z0`TI}XFUC@`9lh{8dTT8eSQpicAd zodMDu*P=x1BPJ?ybRiv~;_l*g<>jS;8FybTg{3XhO3^MV;7EQl z=kBL`%POHmnBF2RWhkNy906dPj}u9}Vsq2Vws_=}lW{(@Vlm;BzzgmP>!R}B+`h0P z!gY}TTFK2Q*0MHDYjVHhYFCyrl%qPw_9WRi&MLh7s3&ZhdO=~%fypU{}+_Hj{LCC(BEIlmWIyE(6=i~F|I=~*q&jhrFWt*G&-`~EhyKL z%xT+e2{Hm%1cXPh&LS+5`g^Z-RV7(^v%~BOg2JqUiQc5t60d0S+ylOEwxmQ$Z<5K4 zeXgoiOlWn`IG3O`)akrqG(YZ~QY{{^Lp>9+!f${N4@d>{&Q~hiRIJ4VHskm8TciZF z3=|tE|T<+01>)woDe|DM=_xF;-9v z_6EU1V6nhC1o{gKfEw<4Erg|?Gi*hhtd!sZD1oe&u;n*6SEUzB$ePw%!#z;hmasN8 z+==T6bQMrGjEM1~2+uu8ftD0TTUZw<3F^T>anv4K81LXV=;>s7JpUm|vs5pmX(K&h z-T)>??f$h07{UJ0{5E>06QjG22w}=VuVZgD)MQeu8);(NN7K_`~ltD%y zHv+=#NUn~eCEi2lbA|gAWvNvoenYQKOn{F7kat0STf>>}7;;Q^W{^ z1Lx--sSHbP(#|18VnTWpNF0BMe+d7jH&-B7tY*PM5cSEeZ~${VXuGB&9uxF-O|woKl2 zh7TXzc4e+_(mhd;mdsD)%g&LaV4oD#QeO=5F4H|(krv<8;asYh6W&w6WAlPg7XCyY zrhTdcEe5SU%hRGK#HWGCM`3XcX{<;+_;f|t4$o9#hD=06{~6Hd5gPL6enTJM4Zz%Y z&s0{wVT5KR(G!wFeiq1KDFkC1w`$K;s%4C(+Uh9rD8T0c$O)Es;4v{@bAI#N#~sgA zvSk6-G44nROCg?zLU1}(^ZGO8eeU_nwZwLrDSeU>(hEQeJsVC(@cuemBKJZiTFl-7 z_Uu_x&@Td-jgKVNL+a(l3bk}>W>%fm1osl)IR7g+er~IGqHl1}y;Pa~h9R8Ej7bXk zWdL(MeacS1=H_0mOiOpBrj! zrhBs@E;7uF#L=Z7A>wa=cyb$UVrhX8Z&ie)z1sRnhMgA^*xLZ(U_<_$!u;^{D))9J zT7r#CVx%b8cL2*~M!Jb2P}!|??>s1OR)575h5IgWd3fNNu#ow9x8f|rV=?8uLKYIr zdq81#EKNzrAA5(!cJ#XUD$8PC+M2XgPmu2eGM0burLR@c+g5B^cE;pK&9 zWoco>eN?%Y40>iDkq{O7$3W*<9_T}j`O0k5eO#fIqof&7))Lkyz)BcJ9F6~^f-L5w z!vc}Sg!HM16g(?PpH`A(er34S809md@Y2Db_mARJ_gO_*S})~%aik@6`W$FHn|&q@ z=O;O<*KnU#oTUv@(t0s5;e7!-EV3!l7~WCDS-IT4X1VGw%YIRz7GIrwMlnNE*k1zs zEMn`s>@AP2`v1h&6TYm47&pwxw5^WdZ!!Qty4% zXbJ6Wps@{-wKt8sar&at*A-@owlmk=tR}c`0LNQOp{MZ`d+gQfo0;EKt|cl=cD9b6 zR#oAD3w$<0K0Af1z~E$V*nL~6mIP)>f=ZNx^c|3R5^WzGqHR+8Tx-aXyYDLO;3rY5 znDD*_9{a-(V9+HH=f1BfOT97z3n&Ta2jHCPCrtYW$M9g^_~=BgZ*-*J{ZPrHhWTRI z=ZL9GX?}##aFj1cd2k1O06O+XKQ<;|5?Km@p^)Ika_vXI+ zJ_wSYd=%r~N**>C`}Ud9L0d}lAC!cX*;q&Urz-xd*t;39jn=hT{IRnCL7sPdBrm{; zLixt?A&1L#P|Mn=Z5^8`Ds{UVX$Lo!{5j5-xkHt08B7OYv!fg z=GIZlv!q~C_Yi`VppK4E@)9Q}1veYE%aj*W(fzg>P1Qd zIu0PVzU-s1&Ipmo@e12&n4OE6pEzbYA>IPSWCG78_!j8HXbpEuWv(fK7byz%R=~0Y z4SQV^<%L;yYvoxQG-HbkO2WBKoP&g&&qKJ|9wcY6=2jvn&8a`HB%Bk#;f0^>e)534 zouVw=)?xS4%L(tqh{yfuFH`O+I(L%tEKa&ZS_Y{F5)|UeASNsuKMn5BanZRt??Fyc zxMj;O)3PFR=9nW0OCe50AvhD|&$s&{{_ZrTT8=}c9B}tC!Z;lao)JF6952U`Eba_t zS-LYBh_uTI^Gq<=U530DniI{ypXPRFDK=-A{23{*q^7XX20LxbEaTgF?3-SZ-4k z^xc5wNJsaiCP(=*W)~~eGW*+1POF|EF9DLnQ_faW`URi1bC)X5(w%A9%Ah2q%c9D~ z(!gV`wr|BXEAWd+TBBu7UMQczaGUptzyEvCwM$!wY>)? zB>GEK{Mif*D>wrB~h7c@>xgz4YgK!f-@ z1eGPcKEF+=mV9A`DQHy{ejoU;P$aw%%n1$GujnpAk%XKub6}o9u0CrEhhpdkUE&{5 z`j{au=&*=w*}!3WDb042hL76dg%(;3?Jm0ND8D1HLyKS;*NHO|ZDIl(1n@jx{g9$X z6qf$r3y%qRr55AfUABagBxT%*GV&yd=eDp_I;3Se_({;BCA4AC61%??UAPeiS)!PX zaZf@*8I9`X(}X?<;cVELqAV7(JsZ}dC&a6wdi9kT7Q$<2;|jCH4=Hb(CStP*FxcgU zUHb5~u1UpM`YB_Vp0%Vt`8L)#p9t8s9xT=p)^4zPw}yh=@QBrvaxON^gGOumF!6fT z)c|m!G|%6m#;2iZV>A3$_@ebSiX1W^+x$!iX{qVGQ4r3PB9Z35PkJAvS}KZCN1jGR zME_doM@M4DYD@0EDu0ooQf=)>5+YuJIESyiBH%pl{S;t{cQV!k@oaAq9NuT*mnU~s z%6oCV86VB4%q`cJ8*W;mmVKty6>OHQ)U<>W@OY5}#jz`xlQpiso6$m8#!Kdr;uL8q zNErp;@dB$3Z{=o{YDs^lxk`}|${Z-1F9>@U$t`*}ugIKX3}x(HBsGOy0h|4S&%yY4 z>+Y{Wi``7g!6c^tbzmgU_w_a?Fqq-wh+9yYWkE~Mr*$~7=ca{nBr4Tg1e%>|Fxg|Z z2EBMjex<5dOKv&CXp5TgYT)sJL0%E3{oRs+ERDICa-a}0La2iflFDUzgL@WlE2s(LIW>Q3aM{+k)Di!7n;Ba4<4&r zOQhLh^B^HB{KtXMHayy*rMX1|0r#n!?(vGx8LFMVPaD$|_7lM7mgb2R7OhWIq$R*? zq$l_j3)d%sgs6niJ}tT0jC-$*vyBZy}K(Ygy= zTc$zzYz1vK)T_gsNIatV9Pqdc>B@Ms7{53ATqRnpO-d})A}Doy9+)jBW@F)2bYk}T zS^!HRlTrZrUS*oPl;#B}O-PRUqkH}w;0qOPNlcb9&jCVANG}5ERO&npTxeHpex%l< z1>iz?u}qiQXWWYwJZR`<3MI;=SQb14C+)gYhL@lWJbC=(WmlVX#l?Z;g#~{Z`lU*@ zxYXp{c#57-Uj`~hbrfMysuO1&UammP7;E=1t%5>)1&Exs<1EI~imS}cH{B~0XbYO+ zdkhIBA-ziKHnyVX$Qxq!YDH}|bY%xil(6fs0gnR&ziArYO6Xl!py#Jvt4PcGHf8TL zt|m2nT~tGVe;>`j+itH{oFxa^jK85HuknbMz}^5DPm+)q;s2p%mA)s+{@T4!xt2+i zksE2(l`_03s<7NwrLRokGvRMmu%&%7FRZef;NAjUn3LhLOO6dVdHz;q<_yEAJ$asd z);saA*V{nmS%MThHpZCwc4b=X+0IO;Nj={I+MTElV=L{)TVpjwF{;JIvU{hNVs}?H zH&LW???UN#*Nj(nXpRLe4mkaZ0?yD7c()eHl7MVYTXm9_lDr2c;qgF+$?3BQ_#k7m zhGXT}3x2N_z_MSN;hI`ih5bITIo}+M2i*IWX_@FLo;V5xg!BQB*zdAc8N&y`KByo| zIF)Kqpd*kE0m9)n()OjW1>rud7>gG`{=e;$GMt(a&_@8`b}RT#X!|MdKB^o`8Zhy8 zQA|c49|MGgHQlkF9PJ<6J~-(Iw1_Kyofs*5BbhLia_h|)M24PEZlGGB~XFv-JU-Cr3`qCFW6=m5QZ9&C^g!4IY zIIIsL8qF?$Z0++(vILqL@m>%U+803M6J4?P<0~21V*a8sEkReZc4>mb{Svsmunx5h z;RLmae;n$|3eFjZOh#psn!^4H*c>;cZXdy^xUVYTk{z~f9zj?N@ii2pWrETH7(YS3 zuI#l<(4Z&CZvcrAOp&kzOmr~DeN%CkghDbzN=gaoTOhTjP1tP@7fbZa-nSK*GxTnD z+9aka?C*fh&H`I3!+s&)zNFVIiS>50tj<%a4ua#@+Xowbt(QdV>4`kbFKU z>{Zc0)0zus41TCk%S6rCu8Ii?^+%wxITAC>XL#I?m1qh4GL1T`3GOGrg(XYugW`NN z@~29)y`oTm1}gU^Qb}a^a`IZQ`?*5blAVf63iTHeHGUdrVsCND{ZgqG zTc5PMtzyFa6?hyhh5;0w!yBvZC7=0gMOs=nV+6$&h5H+DdFddp^zy3rw@S2BGV@{H z#G3bafVEA`iLu_kocq1<);cl0p49ITK(ar<=-E!Y$5kCa!$rE=OlyHZDtp*q@v@Sf zsoGMGKcO5vcQjcpYi9Rnh58pzIqZw2b^Lq%e^q7|Q5bRP%qvn(&=T6; zKx1=6;YBY(_w(> zGMsxH%Wz&gyPGR_4a@LWRjKgdU?{K(MgZVK>@Jt$<`*wqE} z;Dpx`wU6GcZVcq?G| zASu4+=RXwZZmo<YZb^>V0j0e0U-p9Y4;w%=m zZHNSrPoI;V08a$)WNIc_z(v^(Uaix@9;uI$6r4Ng=FHF(`pKa4lt_-xQRhxktfju4 z$L6S>6F(KiXj~%0TC7wX?lh%!H7-R*Dt0ZmbLjT{c03V#f5}(2XIdJvn2WI4~JfIeff@x9Wl(513vzf5SOCd2}KDD zOL`0kRYuB}9&&BCjmo!}^G@GL7fGR>2Wk`~`_o&zA#i79b`>N?N`krzP#pP$tNk$g z1No7@$-&VPcUR?E#!`n9r=lp_9&kB053?m)Cf=k(OK_eM^dv75Zw9Mnwoquzv*mo{ zS!&o}wuqwC@B&b|WhGg+xI`~zhkea02gOY$?V|KwOi{QOg3C@iSa~w<7b(qRa5Kz% zWY_No##w%b27}96B(!|4(E=I|r_dq+J6IPhc-$~a)0WMh0aEKrP@=?QFJ!qc)dEYb)3=zB)h4}HVU!<|pl0I%lUOZnZVOER^k99N(mr^N@M+&Y>Z zxWva)_=5CSEyzy809@wPZPXB<6e-G5j9wIjt&4v$f<`}<(BEYp>oY6&Oi$kIfW5iP+C0>jPaqYa7loRZB|D5>IOF1cBEP zw2NPClqwbM;5X?Q=8$qM?j>UfKdvaX8U`~MymEcHOy36L*E~m*Xc=-H92(vR71}6h z+=fVzj?w9arW;d|rLWR#yJ`vNDsVW&l$m#Btx|R42ZhR<#n2Me1W-K0wHWly;iQr* z&DXI~IejTA4;I^qq{pCG*6mWFB>~deQ;?q2Z+BF`aJJo_XP;7lxOjSS?$@TEBt$df3&wD^fP?nQ;A*O$c$ORYax!&810f$+N0v` ztAw>|xI#**Q2>cIJ<*J~h)84ce#)`rJhIx3#e`G@iEULFioMf~+QKrW9;Ov%8TuIm zQPhN20*_}j*~UGM>Jn_ntea8TR%+n+W^<`=`R2_twNhi#(hA*st8S{fxy@TPmwJjz zmCYH3QRReI2JI{o99xm20k||cL{|=YnSs#>FW_dCKWgA-o)1V@m*UK!IGkxdk>gPUwXrjSaiSDmd%b-dz05LJ)T?Zc9x3HR{ z6nnl(xBO^xVnKnHYIe-p2T8#$Mp#OH@x^xi08>@5mU_~dj3S6OF#*;9}cmWIw)QYIvY+yI#; zG0f9QjZS9JBYUo?#I1%&lx3hqPH4-Zg?%o6l5e~`N1xOxJGV!9mWU(uET3ozY;ROG zi8$~8zuR|EsLY51T7p^uicN2n*WwMl>y>8NP3xG;@`8eVK!lXz&EWuBLvvaoe^4_A^UWb9Bk))U^1;IX@s zxxh<%y7hdi+pkc|QYLdoM2doa1hC0%7<_%Sce2<0pE50jG?N+R1b9=2dS2olsm%4M z=Ou!D6tHa9c--hy{f}0p#jd4}9MKZkV*q1UMGl+pQy@H!ZM!~Jk(MFUh`;}mrYG3P z0hJY1+KGxYs5HUzfX-}!K3AqA8}9j9fOTvF)|PU- z0Oer+!@DH#tXQp8aYg8bS^!IolyT-bCMeVwfy#Mgxw1pcB`oax%RwR3eX-Il+s9dV zf!c+o6fZ$3IH*9a=`&)@*_vB)FIBE(bG5~o#Ds+NGH^IyBeScw;lKCEdmZlO3LG#D zw9HAC5Eb|<0Oxh0z$c59g+-jPbgxvfrGGQQDk#ub0UF!G4PPP}s?pl?)ylLj6604y zz4?lX3GX%FvHc3;Nxpsb+JmBHjG-VVwAX>gnWM1urd!7D^-8jgnp*nRViFR{8zKtd z;PQuL-l!ByE;026SNvhJHvz)oZm0@n5omqk-mDx;6xcC?Kyp&2w*bcp;K3?=|2Lea ze5=ANQG9Ed5>pb++rZ%-+E%N{(}izWjAg%Ontm}#5=1h>cn26E(L`U4K&#=(QJB*2 zRFg)MD+=^IfQI!YAGGD6;@+!7%ZN`q zYRf_bdLKZ^wGi)d`?b*fm1e15rmcz;1^WSDlWT;~i62y?CDO`RBe0ghJ_OiFWaVS; zhI5hR(A0+&I$&@Hjr6xlXldeKgAAfV{|M;Z!qNDVBZuyzO16Zv9Y>I83ix9H^BBUL zc>d_C-#PfWGA&cTeV(Y2kUjwtPZ7WMNNX@3T;gQJClzLKJt;eoF)`tN3cQR#dx`tB z64z(Y>caLjz_KL`h8wR^xX&uj(wwz)Cj=#_-siwM#gA8{(~(Ip=zAaG(|ey+s3lHI zPpYR0OBud^GVr=O)Rl~5>X z3im7E2DdD`+r?&W)_qlxmJ~w977p)iLr-{L1COne_A>SyqK%WUE7+7~r^ag1P>K9! zx~%ZO0X~mr|D;ug&pCfnp_X8$B@jo7o^^!sEl@a7;#c&s)XcY)WU=%qEBQ!EXx{;i zR~I2B^*`gjt0>D(W=2HnrG)f7ka(2J0T#MzL5`t>CvU#5ya9th>2zi?5*6|fBC@2{ z@bS*zK7XiW%V5oLpOKzGe*|bUSrI~@A1iQ8$%uu>Cb@XX&DTk?{}<|c}TgRE8McxPo}SRJf-VO0e*o3M4M39)*f5A@H+QP#cnl> zsjOtU$O-LNpmE3g^|;@Y-ch4G#IKcSnKLbh04s8p64r0PVq2j5f0TBZE<5*Ig;{)d z#~nph6z1>1Y>mF;voY@XinWZRj?tHB3iuBIbDoQrlKw4W-M0Osk}X+))HP*7P_Taj zmisfxEg}4Ke^#O;5X;UhK}=|W0gcZ~(W!^Ia{vha)lLe`jPU!raMBZmieBv->qW8I}$w3@n}t(?kFW$ zCSzv()T$f}N+M6s3v1r02zmMq3SDEKzFk{tdko5P2HCKz7sEqQj@6=!84O;UXAVJ5 z%X8t$U4!E~whCDnoJT)ii?WWb!rIa*w?H{~4B$<)5O>~E!Io$t{dqN(63(r_;S4_S zP5AFOz(R&N?AD63n2**VELBt3w@GDV-fznf4A9lu+pZTo(iHXyV6!XY^XhOMRm#GY z8+2Ckb_%w{_l>mk?{PhWo(Sma@u41F1-Rg1y=8Ec(uWKqJu~j+Dd)etDAH1plTi>} zmG*^?&e7pBcZz~7!>h!<8Lfn4QUW>^Aojif(MgJyDzg<@yPc*eOFAdz!AVgP(&-=_ z<=d7%aFFXAcV`?FBK`EgR}#dTK%AA*eXUQOyR#HMy7nGyRhQzNjpA_BMiE`_EN!dO zX7qLL_6oP;7Fr`i)e+1&z;G89b`@i9)Saswi$hAjQ5P2y&>f=s4EhJP!)25^D#tQZ zN;p9ojmfx@fbIlPYxM3fa%@zT#hYhG@A3N_=YhuClyQg8-%h!+0xcnEcKDF^gvwn& zJUc#4TU;K6<+Reqr`+zU{0YNAwL4WMNSpVdOdO=r1vnb2b(?VS+iv&95(_~{IKAMojitC?nVu7fves^!!YuPU!>C7k!t4X{6q-NTuM)d{MO*fd+T-=C zhlr&NIg}xJ{)V%y_#6zKvUUSn0Lx6zv@!`%fo}(#-KflT&gHlrO0{^k%vsKAf*S;m zJ&4#-j&S{XBX^}T*JMv)k^Uk|_VG+4&aDtx4zOOZ{;Vg;`w zfC2!#@1?m~sdssaj>fwCDbTVV-636@kQIIre4Z1WvE|V+tx$_UPEFS)#!Cqpj+Xl9 z(iQAwZgHu(;${?NTFcSzMoYPbl2oV+5}Ol=X8a?DvkJ19lZ+9WsM8!sd^xxtHM9z@9wWK%WzNG$d9yyb{%Mm#^c9d7ZkOI#`97_S_COH zUU;0LdQgz`#$zdg)BxdtBpA$a^u;YH%d)P|a4tbiXm!vy=JaPS(7gBq{?9oDT7rX& z&Jr~tHbCTA!6duv-D&Ca zMLvP>5c;cCcLR7~N39TI9iJbdS0LR(wJ=)^kxOf&)G8-sc^IiqokN0jIyJO5SH7{S zQ7=@gONeF)B|dQca4o{Pp$JG$Q=3GN>VN<#)Qu?A$-Yp?j&sLsYAzN0Y60tZw_l55 z2?yGWk|8SvcmxU%x0su1(*=mQ|5L7I5VmncRf3w}ZUXMqtWF4x|41cU*350>l*VUa zDa4~t2;R-r!9u6o8+fSm(aPRx==jXB7vzNa7!Wz=OZEreyhU=uJyyBf4cxXmr>hG7 zalms%F8PDA_~7*86>YJp2S7tnuulM%?VGf7W4=g3>xoLV1TtBT?6rjTB(OLSlB^<4 zp<=V?+>@1Q$%7oAmO)hDPsxJM1^83f6COkb{xrZjpqEyr7IRNmq$MKAYGe9);XDJJ zaL_7Ys?g;P_e|wk0)hjyua^|?vj9$;JfR_K?%B%TzV^u@s)BzG@R=ryE)&i*=iPI) z1nXt8SXc`2JQU(|s$8n`qW!J{y~4iep0CBQ>>IarLW<=SX(`AHP>?iJf?W~!LglZ6 zDS@J3Uj*y{owwr!^oz9=>+QT5AccAf3YF%(r;CkB$-9%6YEjm~d8@1x;AJR4+RVUU z@cfr6fBV{JhNueu6~L$2Tn_ab?v+Yk2b&v21^z0)lTMs27SGhG3oGu`%H3+Hb(ZZD zIU&9V#54owpP+KDRp>exKrboa*8!Y1=jgBs-OW_)I?OpJ3ib_I^`&u02LFx9U5EO5 zNddnJU|s>zhhC#V(vRpjE7o#Gw9Uk|2@3QrfbwQec-lr*2a8LEVCCPcY|DxCHg=Y- z6ya?sLfQcH&v?7HD}5ar+)E1h9RPEKPbOSDy`SdZso1U>yiHJ`@5-ueX`xJ?_kFiw z*P*svQo!#4IL&H?sIKPTtL*It`_b0V$tsJg;NJ&)W;_vG*!#5v>lIHVUDpRt2zFhZ zG?l?#ou(zQ-wghs7NINGl_4vw@F5g{^GnI*qVS7a%zaqtmP3aJ=mORh^hbbB8|_s( z0QXVluES_2L4ke@&@>x9kE@LzSL!<0a3~7)6Tq@nOEz<3dD@?oB18U3g?DAuvUH^g zpUSE^Y`*)n!q=hZP!#NEfaN@T@E`@sMO1NO`?Jclga>V7t3^(Lp9Ao0vKM$Oa%qWD zfHJ4Zfgp^gCg%tG+~>6bW8K)71ISB(zJLN{t{EsL!YS1+YGKxE%^=cJkT0Pie7-w5 zSS0&Vo1Mj+|FV{3xSM9_q%B4H3W|~zvd)y*$$nMKunr+>5Eb~>08g_46j(d=btSLE zOn{vE_7#g9xvk;UD|&#^$ac{ESb3J+#FkD@ND1gC0HyggEMS5;`KeOZ!LLQS zQiPwO2yG6AJ~+L{{andgyD?-j2Lm|){sO?XxP{Wk?w5*Ohq#3V1^O#M(+sAJL>ri776zJ~&O*5A?pgC3Gey`|tFqbMT1^5FBkhTmg6-)DF z_eX`V$HEAb0{#=gd>BA7$#frP(fwJ8mg$?7PJxt={sL0k5)6LJ$7p|5=Jsx;P|`q( zs^I?ye42GEktuh7SM)kqM<@#RAHb&7da=0A`w;g}MQ>kwtwmMv{{lX(*1p02t>|^A zH53K=A7F7hjSV>;v~mAcoMn3=)g`c!VEzY8TH`hyJum5Ksd30rat(I-+8b9?1%ERV zPqmYaWqN&nrr{1%`a0OjAS&>~0B2Vu6PHYQg#s~mbH!SAakJ)bP!rza;H6opN~17W zaz`k09W0cW6!4J%rw(Df=y;TJ*JB9BzA}6?uQK-qFqA%8-?MAAf9AALEQqVv=KGCP@HSH zTPkrKMwFKn@T~ymMRNPHI$E*cS_{zCBDsUK6y!EUSoZf<8qJx@%hf%5C<%I7`pZ>! z0^oc|1?wO#PMoEIu)c1G?=-6(R?yyT|GE zg>6?)=4gQA+*yjjl^fqSI+1g?S8nH3{eh|*^n`W}Xx`6x z+HmjC&}iSpWbfpJJ6DOF7xV`r#x#X`2T=VHDNoIh@ZI!Wzq_MC*EahiNnzdzOdpDK z&*q26=yPThZle-AoB9J)vp)z5?>z9LmYv)&p6l)B*?4C~uB~NbnnJw`sL|*d9GMu) z^-a3FDsgS2C+G>SCrP7mGd`IgbDP$Q7W9O+8MLTHcT7%>P0(;29Jtz@uf(;rXiQV6 z7l0aBxgDd!Ik!b|YqN3?6V8R;L~|lHmFvq-_HG->xr>yxwmA_=3iEDYMss3E@5GLY z9L>UumAJM!A%eoYBw5Fi(fmYioNk2V+@(rfTOA`wVP2N3c*_gW{O2nz2C(v703+7iCufN_c93wR>V(QieW~^qj$wv7z3+Tw!W- z-1V=K6Vnn-4jfq1KJtsvH8sSlhv(755?i`LV*}=$PA~l*>9s8d;Q} zBl?5TM-(+NIXa#zY#$ok);r{`R7ht_mIaB6327%tuw>Gr6ZvhEG^oX)Lr`WPsk(5wIL81RUWPFr9z&)BDcVo)ve8nq^Awoj93IvR| z{@hS*GFPC6o9G*NS@A%b)UYe?t3hJ)SA}OKdLBVR|>cQMl ze_>beP(J5&DW*G%5YrOQZmkW4Svw0uqcnlgR~zR)H~;ymE^DJwLb)0gBn@OB4iIR( z&_B3qkd_A5D6exqDr+>Pi3<7NAY)_<_wLL^mgqi;>~3Tv)P!}duy_hg3=Q_>?sC4n z?;2SNHDMLNij6`T^7|>M%YNelO;SQBf`U;<{-`iO|2#BwwVPHLv9T;Bq_U94bCdb;kub4l*GP(s326=_Tsg>(jP>^IEKKCC%2To==jIjGc}vS6w4Rm4s0P0};H~ct2S$bV~}_O8=Oh9m%RpQch@f&_dr(l;W+d zQ&QJ`6QqRF00n)sEk8&;gF4SO71CYbsFE<2!9cWyUPH9CM+x0UTR}(k_d*{BG=fW< z9M9*(PwZ1v=e15&-?U2#bwz50z+!UusJmWK-PJ1Si2ehhkN)@Luy1r^V$wZO5#9BF zBqfwpP~hvR=V@q&y?&5_y7P4jHDNs%EO>^&;jz(i3hTy4huuSz)SYLDX$j{BaB$K< zoP2yhJXCp|vlm$dB1KcU4+9tWA@75{u~AxW5AMpjhbwX`{o}#ghoqd)ZUhaHe`1AG z*ty&7S6+9Kf2*Jn9|5B8zpxTS82f(;>$3kME#ce*4kEu~L3Z_yqjrx}V0V#Uny8Q; zB{d8td7zgj-J=!QT@7Pe!g&lhu(}kdjTL;Z`>{&t&g!a?Fdhd6;-lQuB!y2o@`~fz z-QyM0U3?T56Vele#Irs()t4KabWdC(DJ~|YCxHZOC9`mBbT>V-`ea3QXRQ)y!g>l= zF>B=)x=&S7cVi(~D`*MlY2ZXwVk|Abk{$Tlu%v=ieFc7 zf`C#DuUANSGbe=NQKsPyV8nBu!}+1f!7)lVyirlzML^;ZL{Mt=CJ_Ca%;$A|c<^Rr zb=P*Bs)3YH-U3S7I&+JAtCF@-_Z_@P2u6W(Ok0HZHqfxs*A^JLw=1yoiKMJpxlK`+ z?*J3A$?n0)9safJ5!%Xpr{cPcP2yrgdKXAB>*_ND?^aNEvnn(sw1o2>aQu!U1$0-^ z_Huxc4iyyC^wLRLCCy8Exu4 zGUam!>^`W#?%Fh_C7cg|1M5qR-^sxdTGaa_$cL5Go%Kzq3F{+Z>1w+_N5>W1M-|lF z%=T6CQbPHd)M&hSkot;NG~`nz+{YEvU5yfI!ukYQeg{7`fczM7pHyCV^Ey5xy`nHb zCG`t4B*d4WR$h1YOQ;F!GhiXB#P;k0_gSTMmwyR5qW`(1{ucN7b% zqJN>3|4Y!v>p(d=afJOgzNciT>xcjPNx{Jg`NC;m80UpEe zqVx@oPUOdP?(0hF&SS*Ig!B!NeEb^r>iYZLH>TEh7bIMFOQ-+fmZYndhIi~jeZk2sH$R3oF4?)yskf7-r0z^=QhztVz$i0sl* z;IfytuW6HRM1)KxNi%IG88VY*Kp?!Cc{B5xH}j_7dnd^hL_h>&7Xc9w0g*)k6%i2> z5fEij6!%^B9T5=`5q_V~Ip1^cy>I#EHs$xnw0Uplp3k}8<$TZnJu^MeDH+DciBVa} z2WIzA?VAgqaGYDTl9ifaeUe!89ohjXzQcH^e##Ng%y&3~rr~~?xI;SMW#KapxeH9) zLY?n2L;EbzxSQmNvfcDK$2zm!R3&CepC=O6xvIH(uztaT&TO4mXc^8I4JUe7n9dfy zbPI7Rv<&CV#NqmgTe=pM{)*$B+4>kk({TTtxRgp}n!up?s>7X`R4Pds%6||AKb99W zAHL=oXC}ujF~$)7lL(A(#&`8}m9IO{nMF7U=o#LB5s&go*Ua?9O!$T)otb=cN`~>@ z#NZmY0fn3o-*l8STjLd4hVwtfp^UKKMM3y2huVc--h3HRCueB?YiKpI@zU_^TZmRC zXK4RtXvR!bSH5!#(dy(3?Yl&)oG3AQ;4CZ@O=7u4CrUkg;QK&SWNlob>axC-L!Ftd ztrD}hI-5wPcS20va~$W)WP~pEs-=5tqTna`skozV;~;0Yqf~DJC3}(a7u=8J7UOaN#v8xgK&EXIm6L%Qik#aMBz%)fizp?4i0o?E3HP&uzqk9*7k76 z?;)03FF!O2Ye%@#_Yli1nL87U+KOt1QwNcVJ{RudP-nJ-s>Eyr?`m%qx7T6SH1Fn6 zXEs|kYKC?90j!wVc8~83mQpjUA0}4yZS`~D7eW^!E&f~sfx0mPs*)XfJ^oNVO>I?kD`j2bl?y$2DC@?F=> z-uaos;lU1cX7arxWhf7^Hxk=!V)yK!14mF|ae)J!*&EfU8P-FIB|c_}dBxSqt#zTp zouQ9;fS@5>M8vZ(h6mwNzSu#oz&AHvSb&DnZILb|y1XZ%t|;u-6E1OpGn435VurL0 zG&f~gvo5vQUg-3~rTFg0!(~L1vS@AF(ZzLwt)kyr*zER-?s|(Yowhqi%?vs`V^=py zr<@Tq=O2jIp7h^lML#Q6JDH%iy6YK8WUM(x&H4_Y%Y3r~ba!F1=(O64xZgDompfl+ z2QIn_bwX!-v9k@Af46ZnSf{7evx5}p&nH)#dC_0%-h^7)%>N)$$Rb=pZWwP^?XIl0 zdYu!UUW+~PmzMDN;Y#P|8vaR9Y~(w)Z9@ab{F03=v=CUoq#stcZQq8@3LUi4KsB#F zs~nA@DzkHaCv0W=ftej3X13Qbi*OZLqJS(mi`HVhx47AB7yS^fc4pGUgU9-z+wN^c z@BV;m*A@$0E~xC(Da!C1VG(jRBX5(n^Q@gsI%E+ZMqZeNDE*_l8=&0k%l%M!*y;SF zx94K3z0uuP5>Qsk&qh#i_H?uzTR&vlaR)2rN5}*%(S=@n{dkMj(s_8eqo)^fxmbFg zQ(X+_u{N$V*vhR$s6tacm&C{fqX<80$emMY`jhjP#ymX2k#~Wo@r$BFt~{+TU-?2n z_m<>~@JNE*2Bh1I$Mfx1gXHcZioV%$ccfK1SDG||C&c@S~{$vnjgv<_L zrDYQaS%kgh3r6-?>|G;!a-VaNo*Uf*d`uZI0usuaiqx{1&@cLn{az8SArcS81(Dk2 z4X{|X}Rsc&0G;RDq^y5~%qSanH(Z&I^M9~%wIzMT_Z2UwbIv%4aiV`sqA+m{?sUjRQ z4z9eswa^6va%8pwPLLuGGtR*-4BB{+WE>nj*e3|ZtPS#(~_C$Yw5)<-s z{HC0NM-!Hkjx%rrucUsABc_cEAAQ-n{mlZ~sT_Thn$JLYwta$n2gVut$!6#*Wz^-e zligx9;~y2_$H)SEM=f%(nt_P22n!F%s8SVkdG)-=1@g=9J#qK zxZz3JYKyd#BeMm?V7Ys$g&nojKbeQeJ5vYn*WF)~l1k+Z2nHax-kOou>xK4WL%axRsS^pKs% zj`qV+HnD*D&Uni@^hlhauEDj*09D$h1vWzVWxU`_q!*RREqz>BsFRYzau1de+YOh% ziv(s1Bb=;IZm@}Y&01>}+~I<@u;lpD_?z*QW%#}R%8G0veLh-6BWcTFayX*$Uby}G zOc6TdiRbDPMC)3+*9!PQR(M!;ZklcC_KkUE*4kuB4O1b zn_2YGztAc*k5YVq+WZB0DN~oS_QC@CWOhZR%(DyPnlxmr$}XA0j~pnuSf)2PJI%uQ z(CI`t#6>A`0;Z-IHnT7}d937FtJR_bdU3WK`ZwXO^$rZr;{>OIwbWm0(Jb^e*>fJ6 z4GFai5Q(!eKuu3_kS~|y8j-mddVN?UofcaC7HojM?zqkNfz;Wg`YE~!w};jDZy z2qO}Hud-2Y{wI<_s&je**+VgDhOt$^%D|TiR*=~^TwAV!j7vU#}d@jf@w{LVS zzI7sRVZB3NfKn5l;%w|svH=R)HbQ>|&#lQ}T&(pTFqZ5*-{~k^BUv{WpX5?^Il2#h zal2PR#DsH{Ke7lwiG2hGAN zIX2H?G!1H$<}U&5y>L{?Y^Zy54uKr_{fma7>m19AHS=|okcSE+*!A1{Hc)qij&LfJxeT@ikk`1Il5)ZNfKpGzKI>^!9P9Fty7LgD?R zQykFwUP5fn!r~^ZhN{4NsUtVj`ILad4iXb9qT;&wIim8I+YAsF3oU442wc3(5!1$n z{Pi|{bh`pp>C9gY!ut;$oja86>%ff4rq*ea!`e99QAcH-X;3ASD8kQ^AG*%sl4_yn zYk0Y{lUB7$_c~4vfFLq4?>uyoAl9A4?jX(4LJc-&#@q{Q7( z^=AJPAt{O$B_3!lfIPg)G1CXQE{o^^fd1S+4&~{%El9=~o#y<5fm4k;s)`nJ^>6Xo z3ToKhl?z}7zuLw^JxD$auW`Q8>%+!knR*lr3T3HIO=dxTMCB~}vLW|RuA>?IYIh@W z(S+AJa@w(3iF#W6Q?cvxWYiBCM0S`MMfeqh(~h8*mJz8|f4Qa077mf{I_II;+lT0q z7)ebyfS3kyhJhEjBD|g)a92{J>uT&PyuoqPVrlHVSBQz;I9U=`eFjG)tnnhek?^+$ zJOW8;xR%p_o2o38AfgVt5E7CTBw2)CBLvGN)cu=>H#x{<_;$Qj5W`it>s2Yo=SB3* zi7Po1-9+AqR@gPY{zAK_q7a@Q2*m6_fAM$`-b@xKis^d9%`*&>p9*hrKGMg(D@*od zWT!$)4xEWdxCp;aJl2O{AuM&p#xb4aHyku=n%aiamv#$dh@ik=IVnuJt$vd%P|~y) zFbD0$0?8LNIl^0=f%Mwa(FfbruJ8lI#1J^tTgr9|?MmqIw+K&5%RF0>W5N5(U9`N- z8A)4OHWDLUTtj3fU`OZg(m|wTpu2iGB)?6bXfoK5C0_6FcIP9#=HpX?UKRwb;kJSM zeEvd^uAF``n?ZLxe2)h@EA7Rtj5kie=!MMv9r9zVvb%nwe;m^s-r?-*0=@CO!&fB| zEIL7#d$}-;oWw4sB5gT`MN!Z{FT(GV1=R_bApXQn8s6y~q|c(#ZW*1_z)^`b1!JoG zT!eQK`8>QKas^s>u>~Kb=fzkm&hI%RX~%fU$UrL2c-r$90a^lYjvBLZvSnm__=8yt zI27Uc$uRfy8Z>+j2HBo|xAWO-Pdf#jFaKy;Ltv%>qn_hKn(9)7KOhUV_m(;fo3up3 zdmNcznemIo_Rz*sRF8~;_^I%%xTk4j##Or~A{5~d2`VNGY}4){!n8N^H5B?@2Tw=9 z%jp@jP6i0efjkwzlq8reLKfkD^aTCK`?$tkoj?5gDO9a}syRv`z;2E^PY32b`C*h_zWE30(@q#029g{@|Ek zl+x^v$pC!~5(fSghrG1WLMQ{l{FnHJkKi93X@~I`PIQqFSA-7|h+@k8^lNKN%i%)~ z*v!!~(nn89@($C}d-fIK!$hLi>JDN$`&!80kuwxQD)*7KuuH>7oQLZhy|j_lv9?dG zB#3y##dC+t`LdFg%vqtpfRFA^$vhWCJKuuWEWqWf#%~Md)6Qr!0bwMRQTtR96sGKJ zGqN)T9lMJeTfakUb;U#U(XmXS=K6cf)bIdxN-!g2!Bm@?n&C^lD|suzj5%i z+$qfl%PP@@RwX7wv8O^k+d?`L#sYQ=^%pbli}1JPg$E;#A2_%29QNN*Yi$J=mpVhc z;_pXiu9_gMxjTl)i6z73&0!$M^hzb7`S)Za1B~x$;1n-!n; z{PcmtGm|`YstG{*38n&hw2Hvp&1CQ>`O&6u=-E&fTe!VSN^ z+3RgU0mCrl;Zx2<+CNclc>kA9OO=|KMhA}^IDq(u(l5fN2}^Y=@P@C7@%9-Czn{(SkeDZ-nh4UfZ^=7+YrC%EHztM_3WEEF`+*z^s{NbZLtXd zL0~SdGWkCbUu%e34#a?|Ss|vD10#71(@~Kq>j}W$e-a-*IH+{a^6+&>Pa7{K`e;ka zDGL2VM!yLEMNH=2(d*kf(d{5d@EZ=A_IubCt`BrcNEX;0(8avs-jj^f%OvXmCNks2 zZapB0J0=5#Z#oNUZOK>|ZA6p=Wm?zpd0MlLits;#=PKLSz$zt5QwEQ^ zoz@NQ(28eExQ#Q_%q6QNz@iS$7_##`;i$(<3gWfxkm35wh$)BfVjr1h@o3QK)XC05 zwh)lYj8m%!-%rMATch4ieLk%|6h%s!a9ihj7yh8ZK*BAI%DGjlg2Uuw`1Tr#Y6k0F zfI>I89l4-S+YAEr9N-gAH1SB>-kF(d#Eel=cI?v-UE#`|5D_r?9e#jZ$#Di-**ubW za6X#J_95STBqW0#{f`+ca~NgWb7;Bln@6r&p zeB;ik<uge;v_9s}wB>4h4$uN>E8}ifgg8-iOM6H^)tDjV?7tEOwz}HN1Bxj(D|W zdDYsg9nSY~tX+8d({HTNhqI9rjbwViAIv{YU|m4V;hqkaeywsi8Q|=1t}oS>(6U5- zEC&PYh($ZHJBx5G0dIvjS-vcs=XjSjdbRvU6n%7Jy5X|7`nnfM@ZmA;No1oNa){ZZA!!a%=lw#%KK8~HvpET@# zlz~EqF>fJb>*Bsdq2VXiIrUi8+e79sO@l*Fy9Nq?2WZM0m)W)S{EOynmcsqW3#EVM z#4V7{1f<&KnAfYhvukLt))>BvjWMw<@F zec9Y0^oAg!L_86R?DiQQk(|nKrGqwmJ&YNjk6Wxx%wF#*f^i|!RYgIw)DBBH8zdv^ zY6ncqT6bB+2P5JzMcGYA;SrQ7O!J0JK5Iaf&~-)qCbtqD;@ zo0Q@f2@qkQbC8Z2`AQtt$%d)Qazs)9fspkeNazVcHs~NnYNv)~_d9r6_<87In`VVZ zNn~_nxoOJso&yJRTAMS>TSF28x=lU9sL%edh)*Wt>h|+ zYu6D;)NAn<`ze0F0n-+B1eAv(UX75@R$p{^f=v^Yi%Tbzt^|~*^85}59XGAO*y5^t zX@}@(QL1{x_c=m)wTkR#3sv@mL3ru-jvKGHH!=LBLKh(*k_sgshmZhD<-__M@ia;&=S<{U0QV zKORk7{G8QYa=+wb95{X4lp{Y{i!Vt^fi6lV7;s*)B)_b`oOPMAh+ZS&clH zxG}ord$);tFRlk}UqwYb7OtGKTzObc&8jnaupxuP%FH<8a^_E< z7@sbt(xuTQQxv$aqQrEA8=RN)zVhd`RZ)h?c06`hz&~yIM>Tmq}DtG+KEj%Nr2{~qv(do1#jK(P~!kUd#RH8tEk~r609V4WR+9PZoGCEv|IIH@*lP4&Lls!jlT^B+UN3k;G~D zXub$)(`gE#Z)5a+21iPpz?y%80y}JjsBny?XuvsvMFWh*>>ip5g=3~q{fcnkp|5B4 z^-fDFMQ2zYf8~yyS?n)1N&VhKUKnYzLYcOA_C_2?EpT+vsexc3gUOc#320z_;ViuF6ihLZ>=Xn9uMW*FUH+ zp+8nl0k@Vm_0&4>!@|>UA@IXATSI^!wi9W*$O!?c2dXgG=F3Hg1){s6cO|FV6}_iB zBk46(8G$Qx1VquR5gV`Q{S-Oi0$*z{#y*@hMW5l|=_s8|IiBw4?YP^kl;lL4ZZF0Y z@6hZ->HK>pv3cTEyP7~z)P{KBS}0 za@-8-;n|L#UiPIWG92nwQljcJ+N~d}0q^J77+70IZD5B+YKzAq{GRJPq^%(vgOQyc zYb0gB4@sB2Nd+krxSmI5&f#L{LJ54nV>hca;X`o+;9nwVrtyysy1cG;`kQQ0V09)h zAP^;e=^$Y1md%auLI>T2U!Q)`jer8%Cs>n0PDIM2{u_M}h)RDJ8Z=78B)rJc(n?i1 z#iNZ)BPba+`yzEzkYGPoLPtMKeEJyBr>uQEyx76hd#nT>USC2aGuK(cmt zg=6f(+m2tMZkTM-2WgWjm{?hnob)k2ijp#(#&m>fnljF-fyD44{36-m^0kDB@XCg) zlq<*{t>#kLce0ESZIxt;aus19eu>C*%CW*1gKY-ys~kT)xh9<~0xbzyby^Z;o^b_~ zR!H;{m0HoOjSb{Uu7SzT@)mK(0B`=94hM-=;{xV_dl_;ePY~BU*y0-XS z>&oz2=b>2)+-METts%@Y+#S(h1FEDqc>=*qjHbNWda9U|A|2pR` z?OAp;d9bZ~ot_NS3?QmqSuT zHP*(x?F?yYnKBeB%zllp!JSRr_0k_%dq4~qVK)pm48lj{Xw z;A@jY_(3aK+(;ls4{$ce(x||j$qp5v?U%Emdw7d8lU4*O%v>JDDjruy%uqM@UnhtN z*R?WJIsAr0HPd%IZvcU1^jD3iR9+z|Ud-8PPf{+zZxWe-@0fIIL0&!|-s%iAbM)eX zjAZnUN6-{c%Wh3@wOB8(N&)j*WQgJwh7*M`HZ~d(=izP6OWFdkg*sq&)riS3P`F3p zMfh!^(hVV7tt!sK+Z{Nq78$Tp&@u3jCLhJbnd9arj5*^V;eW^8oJvC_684UU47k=X zviEaIP!sLYKpnpROqJiP42hvP0{Nh3#uWZMywl+?!*}B)L}kI7@yX{!#b&;K%bJ;& zj`erd-nfg5=e+2kM(Fn(GVS50yzyYFos6MVl&Hr(euPU62-#f!BoaJw#y_o-Avzt9 z9U7qc9ebYR`j4Wsg4;acK7O~e)NB<*5^)a>6BL}Iii0~p3h~mFIK1Csn>pJ|`l*7c9E_zdio}#)!VWb|@%d_p zs!SP@XxJZ-S(XoQQEjeoVB2zaxT+C8;H;+g9bd2Dk(o60-uNRZi#zg&A^K$)*#RV+ zL1O$d8KbP!&28fy{>1rdrUQZ$UPyo5=?y%|1C4)Al<$yJy{Ii{o&KH~W4+wgONxBir@HpBsRQnV-Q($oGtvdgjEXtY7YQ69`&d8a2L!|D{y&>|cv}4h| zfQWQaEVmJmaP#r+9J!gA1uM2%hhHNm1d5zXfb{&5oC}Mi%DE)_`n+m7#F}DnlXD*x(C9- zWVa1;A0PvCe|jQl_?B~!7BHoD?$7Ew`$u`*Jt#S6D<&K-;eUxnpEQRB=v0EyYaGE5 z%Kv!R&bJ-^TKwgB+3VvC2NI5>a=gb77Q=8>K-qDeptP>#f**7sAZu1vBIzJ4Pk#vC zah4joIh3W?6`LMurBU!xqX7Ygjpnna_nIBY)fmBYBq9{| z#SiE`0Ft8X=j>+#BhAv9!mS)Ez0g&V(V~P1)L|c_rKej2h!U-qBXu^p;Q7xAc|T*$ zaSobkJ=NE(5)#VSv&)AI(_0ge-eEXeu=K-i9BO-VmFdgsL;9}zZ<#^eMwQ(B?-+W`6#PrinemR6u1aKL>$qIw4~G>ICpZi^q#R19#KSUv<4h+cP1p$ObYn0 zAgo%_JP~42%S-O!z-cAGft5;>lA}mUEYs2&b+qS>qsRqumO1_VBHWcsaofs3#o-Be zb6(O$Lpj^k$Cah(HzOwC?gZupp#B_}i+ETn+{3Zcih)iyo{j>mPDTXctbFG=PuN#K zOw@Y-HC8jl4p8u02`Zv~ZC?E0W}t#2k0vUz1W7BBz;Z}|itr$^a!-I*S{ppf zxDE%OuuCxRd3dn%lCER4F&&xNHb_-mQt)2ml9W9Tg&2Ys03bhm3Eg`iLf*tY;hFE_ z8!m8;n%U(e)C(sjN9jIGWkjbQ57t@%>=9D zV+Z+QNS5;mi!)sA=;_Vj%VFG!2x0;{2PX;sUG>jjLC|vm+GQ@sm5!LsIAbJ@s(sp^5&FCl$CtLQ~KL1jNRNGOyp7%mWPyj*TFx@@JYEAY-HF+p zK0LzNX?A#N1!vS!#}JiB+Vrf=#;BI~k0f6d4NO{;v8mJ|?sA^eYsc0oY2e>FP$LM6 z`*8o<^Z}F&P(r)WJwYybo|=C^oIK#fY@V>&S!pI7cq^r|GLEW1&PMNpxOl)+bgX09 zOp+^}q*7yvOa{FLH0*I+9wq}e&&uBxOw92YHXHJy%R(6MA`5eWA~~^UV!r zPLwHiMc7MDD6h~T>m(H5u+LdYA77;i9$vrpXcy2$_i18v0`)nrd4s9CWtQ zo4huSaf|aFz(IntSSB)!XjzI0dNGg-1``+H%!Cz!b?;~YOhXn-&eT=YVS+OK+*zfB zc@*L4a6{W6=szq342K;yeF7M)bsQ>5$O}QoA-PN|{WL*gmT(juteT9DQ#fkFoI~#C zgy5Qi(W0Z=x1%#!>SP7!0Fa<=L?a!&7svuVZoI|}>jO&z9lpZ#&Ov&*OAdx4M~$F( zqYgysML0@So^$LyfDS`&PWO9gaO!i-b67pq2a_XUgKm0WZ1$@QZwztSnR^8oSvG=O zu7F3Av#OtrQAg_`oQKCaU(M9uk;Y?`XO*F$}9W(9mE$2da zRkgf2UtT zV-ud>h)o>n`j7(Zzr_+)jiKU5Zxe<7KB#r9MD!P2aJXIg?f9K$>r8B0L{{TV4v?!t zl&`o*a0+|iYLcSEFFAZV-^bwFG1xmqNtP@!vMTj5ItW-z9pZCI_xdNX#d-+QL#`ui zA?fg8*?C9{8&7;0EQ=UPcD@C`-JLC*y4m#|UW$%RTbW)f?QH-MPF&Bdv2hVr$S9>k ztlh+56+nfPI@V#YI;&|1X(d)%uLd2YD5{+m9%1+h7bP_JVV4X^4oLCSTzMi#hfqPNgGl!bMNZzdH+cmj=w4d^c>$J+(@1{6Z8U{M9m^jt9X@t9)dHI zUXkt*7z*VMP?P~b3KZOHY$zx{Cl3@M;yL5?u+|u%a1PQwiE&=DDlrMB(=zXRs zYld>Yg;rS!P0ANM)FU!Qcp`!E6H^*+TA>{HNe-Ph;~aW~7ZNE`ASI?gCwPh*j=91o z*y>JxCy@$tKOhIxJx=B;-SA|GPAk8o3uQ?{7Bq`5a9!Kt@txVx*=wfI>VPPB0}9ke z+;}H)8ChwP*pY{)IBI%V*}QIS2^pQ(Q54npL-~$cWRyL?^21b;(g5XI z+LBv8=qE;N?eH{b>rwd2@k`yaRgZKH6opuZ8>nF(`%_2|2 z;*jwI?>s!+xk{hk{wU6+G8nB~`R|;pD9Mu%BYSW*$Ns77NK;#Z@%&R{2S3c}v(}8^ z8O}#qO|{X%v$c#qBP3?Zk=TZ@o98?e!!wCUxpTa;#hcLb@GOT*pMfr{w~e-ujG$1z zE>cGy1OoE#!*xICM(#Fqc(&uFRW_A(`m_%603}g~r-gd8n)5jXW=43vx!%via~-ak z0~k*!nmjJr)a>T^q1jPhz2^~%g9^)QYZdxgc)p{h6NL2T>@fwjPvR<~j2Tq>4z#y; z8{!Ly%B@!3t;}4ER(lhn$7sKW7dj)&4*A+8EeXO>JTVb(h?cB`3BV3r-SRW!kyc5s zjaXw5UgYdthA&UQ^NS*SuJn0}IsYul(%7b>sEqKu*r_)=#>$?}H98XSGG!JUsKBO0 z07)|R3d{!|LyJjC{w%^v$cq?#axEEgP?4;|YeIpA2ypFB|&2ua1iR{I( z4%*bLYxoFBr%#MkP}J9$a}b}VSRyJhVZB1WzI-5iIV40vA{p7ghanMi0dSB z1|1lWFo-M%REH&1KTjsk#ncS+Zqj85A(4ldJ3BLt7Qq0eC(yK-$|*!dSXwM8LBgQu^vWzvRZ}tb z^#|ywi?*g~mkQ^r$;7?EM71vu3@Nc5N=MaeoTbMkC&I>rrWn@z>H6~w0g3jH7$psq z>RkOY+2ma5O15DQuXVm|$z0Wm%3RILj`6wr72c?B;f7Mabh8W0p0yAYt=wZ0?Mj|Rg84^oA6yethChDJD@PgC;tm~MT^CpK) zdnCv03B5C>N>Y&aM5OE>Ou?8J`ew4h<0~%a=n{F0bI?poSKJ~sLT-IpZiMw^JRtRQ z&^v#f49H^NLdG2Kn1h9i@OAhN=inAC_8L)L>|6PP{)#X5-y}YM&O)O@GakcR9Xzcw znFPSIah;V!gy`g2rQgbL5lUWFeD#j@Hb+a_f`*1of_yv$)PEZ(i4?|STQou-o;Z6@ zJ&MW^`>XJF2Tbd`2FNqr_EC+JOfSzGvpZp35UD(Oeuq3zC9%>z6A!JWndDohTn7OgZTaFbPAf1%a$k1F~#2 zh6$u3U(It|?D}qJDQ#-nXpBr(8zL*G>``=Ten>9OpY|HA^a+97Lb}=?kU5H5%O45v zabD7yy2gt=Xlql`P{crr^2Yw6)RF&?s9X+G4720v@LmT^t6dcbwDrVbOUE5omwgd; zpdn*?Q)tDp6$4XYJtgdYM5oj*dm?+H!~30)W*!nJVXq&XE?ayzm%%NQ8vcmbVjVE` z$?iI3LyQkNayrGr_U^WEhkKQ(oZ;S#2QcVyU>wi^E@1%EegDKcXeJD}F>2oLDm7Vr zb9DPNsbX@^#N5Oze2ef3m2&BWWQCiQ{p!0Xpk~;xg%90KUh-95v#FCXL`^Uz45JK+ zf0+1O2<$|v%|cx~f5aJRwk0_jdHd8QnpYPY86qoj5!&!&d(x_p;!nvCcOxblxk<1Y z|IFD)&&g?51*wVq5!tj@1s^36hkm1U*Thpm=-yuhBp)x2N_xGcsrzCimvmY(ev|s!GOAF7za}cn5xm{QX29)CzO??vS!mV;YIF;U8coq?CgrPg zd;Kl>5c^+M9T#$c=M1FBz-CzW8-08Ix9!tG@%HzGlqf&4pUX76SdbC^!GW8-c|GNp zagiaK<^uXhBGY1ls~eY0wZs+-|Kxz_l{I{>RLRI;#$CHtvyJ9pW|oK9KNA^0I8K&W zY9|qte{tZnexs&RRkw2*_GbS|`dq{vWHV|x^}jjw^cMA{ijLI-x2-!dIgR!*QBsT| z^j1K@b`d^KR7U4;$&fm*Dp@3-aQw7wWDCe?=o<2mCCNdkaut7)j8IZRx@v!IH%~t0 zY}`B<9nG7#pa;o{8oM{3r$w|+N?txqUKm}Ga3NwdEy*-(!E=sIcAs%}(&qWNh~WS| zk)8W-bBAZM3QQ3`Ya_C@ineCG>h%?$a~9GI!zN~J75~z47$PPzWuMS5N8t0srcBXp zG0^X$x%>r3P9Ic;Tu&k!pe6{{NN*b;n3?=VV$)3K=}4uLtq)ehmmE307*BgdI>~r- zX9PggRIR|({6{VAjF$VC$SRuhqg|9e=Q~1;I7RR`SF^2B^rJy#! zD-73>TLcF%tgcv+v=b%s-BZ~GbWbZMu^!{0Mfi8}OnHJtJe>F(!mm1mY4zK@p518p zItBH58a+|@*pUyk(VT#I^kh-JfNNs|e*dpIU|QKN1z6FC3k(T1$2d#qpy zoSJ)pgjtM}if&Cz{DNM}$ir5C^>m409y%-EQKCRF- zQljAcH3O1Em`RWq{S}A}xxzGlj8TUIKHSNX)25CgSMvCchDd7+ZtPBJch!r(>dwT( z4>E}0ST_6WE)LpkoV`hmzHpV65U<~mb5~-X4@}5Qq+f_svCKuN5ULJ_yEzZd9LZ>q z6l))pi4^g9#ylV{BRVuHPo2z+0hXDxS#v;HTdn%LJK5**9F*MQ9?o+5>`Mv$L85|$ zXwC}Rn2cuxnlg|Ht`%zQxc9D26X~9gpALGs$Qrt^AySgPe;6XEzXsQf5HG^H1}PFs zv+%-s4w*jd8mO8TvZ7XsJc`MFQRTcBS)lI+j7iHjOi!o1=Ii!+=Vlszbo%pyZbkZ7 zH#U2{s;_kfZGqi$XnHUE(9H^LDK+j*zNiyH2Q8qtVa-62Ia~K}wwk%UH-vyL?07xH!%y~&$OD0d0$|XW3Zmdys#pJ;Ot9@M2vw>XP z1|yDZ*otZ-at^S?^YB1N zZ}!qqkClQF(D%ld19U|%-P2#J^)5VQ*^dvhA>bD6_QQjniL^rMyK`h?^lo1y!W0cX zu>`dxI$9$TPBQSME6W9rmiEn+15P~*I!wp1kw`|S_1H0K^m6PNdLh)_X*pczC=K5U zQh7MNxVtPzZg-hUy*{BA5sUVvD5Gc*)?u$;xmxLBhfN=f3!5uZ9~+_}?IyI;n`V3r zrb*&v7GcodfGkiOSldMO#rN|i4xfH&+e7lSE}|MCAw5mf<@Vi1NKRvDvljMJhf0g| zavF#CjZkuROFor5rwEr3kxnPBQ`X*fco!-B7TX;=Ew(BP7E(_ow!ZBEMb!mZ%C}b` zim=1L*V%_@^%{9pQ>B`vLVw!-|0051qFr0jZlo*9>X7~xr@ z6fOUjE)!|iK|L|-bY9ZBi>>KMNBI~-RPIuZqFRh~T;|7n%c#m&&=%e#s96%qq>74u z2IQ2Qjh4QO7bHB~xl3;Y)Qm87ifUK4n|#{%xa|0lDNZM^nalU1`|oY^6_P8=GoB z$C8XFQxZ49ZoY;v_@M=b(}VYnqNabp1Ep&VQszgMpj<-~Zy$=^Z=zGVV4;TvlMP@a zNmr{llv!+F5T?kWsDB;i3xsQ(wFiUi@rrpaf38ABgutwPfmWsT^j$})=YT+`hokX; z(`>3zax)?oa;a8g3{r9$&=mm<#8pc479bo~ygK4LI2_lyn%tO&gN~jaY@5|cLtkwK zL3!H)B>VyY`kq{{d(1di?eH88_Aj=qZZ>Uk-wZ)`A5};CLqxHpeVk#Lrpl>t(TA zm~(!bogjP|yeS`5N<#YJ!D`*z5h8M?tY}lNqej9$JIAqp44nxazL^DLloa1S4bhdA z8~yX_IwxRgR6vHrB@XbSZ@zi5u8!$l$vJT?WAG|wl6P{lHFo10N&)K7@klSiqX~aM zKzN!51eAo&aPpg5RQ$f-G0swYnK-4?J@;ckMDLk$1|UqNm?UGA1ziisEY)8>#lh@S z=TvyCvzFFC{kdx8w126@G+(Qbm6rb0nGeAt< zbVfpnl@!)vguYj8!bG)%YRt0*`98R?<_XS5`gkp;3Wo-cORAU3i~lAD_AAsa%+@9+ zG=I<@5L)SiW2QA^A8zIo4as<+=CAV$!Pw;X!W_@0F~E{g<4HX4;kpp!AhT zlN1R%D`>p25@4kP*daGGPiQOU03#ipwgIBzt&ZQxiO7+HGmxuDMym+N z$&XDN9Qfh^lrT=u8A@A~1B0TB$T(N1Nm<$*2o8b&RSgQG9JH) z0LoTacOI@vE*%pgcv$hPR8%jJw2-X5fSH1?TXJ-uh&v;v^p0pZW%TZ_HuDiYfv6@9fKgLBbLSsG!Zm?BgmD=%%1Ph^7_ z9J(Knr(1!iZXOE9PLF>n$A&D)ZZ4stsU^5OF{K#=onHvKY41O%DU`Xl;e_+hEJwgd z=|Zqirh=4&b%$z>K>00tACM>R2;NlE4goc()&RjdN$&^^&HLlZh<}XK#OJ#k5a<;P zj8c$p5nslCJ>2AoXFmSxvcORE$4ettyBk`WT5TuyL?Y6Upq=0RPJt&mdRq6?_vLEE zyT!NNTt?lMQIZ5_?I0K56Y|pxjs5%__B#;pq`sMHx$4)y-%cVc+2#1T9OKI3hdn%!LBB`yws44tiXwa{3 z{xnBVXB4Dt;*pf+GhkX=4hpdS5gMbG@J0AZvPazl7BX+IJSM}0*6GK3?FIB^eY*3P zJ~3@(YJufQQ)e19k!TK@GEm@5F3U7HbUz>`)bV^Ln{{Fi(AE{<8O~06xQrd6H6SB~ zDQnVSv{0-@AiH8GvRF=`WrT%)p)Ng>eB$RUmP6YC6Q1Rar8lrKRx7GDDq;xC#xn~7 z6mCAKUb<4yJAuI5J?eyLoyg&g#?2_<*^d1%{C2$FW!NFAy90DYeCu~;o^m3;Bop$? z*Le=1({poKc&@YYy`7uOv{&cg+~C$6#J*3Uz$~-rEW-0n&*+Zu{Qpmk?huF6q)$pxtg{Q^g^S&rI!hSg7o48)?cs(0pBUdh`XYGT?LEwR5q^fO(gD82 zjLiN9gi|kVKD_MD5uHcM##Re` zK5xLS-K~N>&cn-`hqPbOhMJG+3lEYqqxR>C^#EX*xI}ZNE*o}57cq zC#EOCVj*D>evv%Tho#-HwESJG?RLWX8Hsx7Z^R)9sGxO*U}^||=IzMUmrE0L{y>U7eW9iD7=N^Zc z2w1yc{8z{uw`n`yT3r&=qU zu&Cn|3vY1rv{ckYTEC=ofQ(oahd0-GeLG@Ea(jd3^R}~Vj=#?tIC zwQYaZ*=d$6XXJD+?31_Ws}UC|>XU@OmB0`0-$088^2ekbS#|2~Yhk8}_NnkD=P7OF zja$PtYNA8W3SoMx^mj~wCKqoe7qq)9uEGnZkoNNy=b)L{6Cc+>snZhWP18l%c7KbY zpuGdpZ>#7lp}m%e-*DXY?k!KTvGo_=LTHM(n1soX5V>co*e{Flo8*C3P-QCnq0?eA zMt?oLwIM^cSB%Qg@+fJ_hM_r@qlT!HDNqr9%LW58w1|*yC%nzEuLOVN_o<2wdrXn- zlg$2Pq-YuhF3ptAT)$07mW{ddq1x*5?GB%oT;*I0d0s}*6~GCrKt_-Ivg_sVkRO_S zd{?}|x)#N_JO3TdQF?XQWRML0ts^p;rob_Jk1>jHHcIFYL4JN%)&^_7V%bQgV)mWR zP&1EdEt>!a<|v}V(INSrISWdT-bIe2U&Sh%w+Jo#o^y~s)W-&<3MC}la1Q^@rWO$| z{(a({jW_RZEQWVG#wBrXMt3K-VaGE%S+V7$A3C<;8f!-6ycRFW!47@& z%xyv62Le+dL8f<*O%gA}2=8$onyHq!|FW>)NuAU@JxO*)n$c$+Gc^wy*YWBeZX$CQ;&v%@vw2wD3o( z;qN0CRE13Bv|_O$wd2)JZzH_lnQG>?H9`ZjYk;gA>ahlrhcBUu@JHlEbWRwV_Iiz9hw6fZSOc!lV~JitjGZ`(S-GOFZ_wK((Fj9 zCqmU|i3FEL4Z`K-{2+m8epVdJ;X@9a9)WUY3`x-nIU&!(%>CCNCMa_~V_`VNCi7aj z7eC_A={K&_62|V?L$u_C`Rr9DnD`HCwoSgzYw$x`<1kQ2s_y6}V}_p(!!pN)ht+K2TvaU$r(r+Z?=&KGxC&->|7LKP_Q9~dnwbe zi}25cq|F-bXlVq=9HsCt4!jG$9)FM+a3m0O*ayi;2!eN0^pWb@MLT}B9VZSqQZ%Nq zbN-d=P_jXH!PR(6-4)nO^}eN~jvi8ZyiN5I>N}nVu#T(=6Vnv2eIb8aj@i6_!@(iIRqkS0f z!-?Qlwf46A)L#8G(J4$2_E2HMQ=dk+dn7n8}zam*>A{J?$^3SyyToL};n4mh|UEl14Z#om{ zC|9Y{4I7|FOyrI!`=nlmf6!Ee&|G^W|2JCNus?ju@zWdI&j1m0cuBb!M#sHIQ8xB~ z@SA{1zKR{*UEG;vQ*u*)?2B6R_y3YX8bnA>(?h|vxo8xS_Gc|)VStzb?CC7>&c5>6i0=}*<`eMwrVkzl!daNQCLK16 z2RrR_hU`=6CRDP*&0+K8W!Df2y)=L&>ZRYBSjp|Xl{3;THPmUua}r@67<16r#lU9< zpNI)A+kN)CY74`&$qasQCW&Xa)u$O3&T%##mRu4Q8+nIvP@^N*+JR{jG8B5EEX;jS z^08}gO>9{a{Y5tAZ{_{=2D{DP#?c$D2z_1|@X|3QCTiNO{u`|_2uZ2PP1S?wK&TeC zn1$OmMD%?+lnGlBlW-_I-0GDBN7aX?QJ4dO#GBuaY*1Q8)ie*ccLvg8xs;ansJ^v* z5~@)v1<(%=2&*6ELnju4X{L8*2xhBYA6B3$A(=d?pcx#qp7!#C#H2G;y2Wn4I^5CW zcHrCb@};tvs@-C@Uo8sB%-)%7$98m!#kCdv9_%AoN{2htWpI1X6s;DH?GF*2s|uIH z@hcZ|cXBq;Yuetra>-nkjtu7v2$$yRoe4?#ve-uc1#eV}3WH0fMM5kka1LH zcTEx!mo^P)BzGlK^i@Mdgu6L@Q`ZKK(81CsP!j1*X@Y-O&%e794L_c)yUVaJuR|Ua1#pnwHy7!gXC-uEVJw+3r)y7d>seVh+kBjfkI4Tn6XQbztM`t%W!5(^5()U-4U#UR|5 zNc1VUH@d#bGRJEY7{mb?`ifQ|BGyKfM2+c9yEEcP!F&8^_T2rj$Ee$k+D?-FM;Z3Ed9)y}?A6h(Qa zC>lERwvmzhf|2Sq0@Z(;BkEG;>9LXRbnKP~1yuLbT~{*#N|>!SB6e5RWj1PEM1IB9 z%XTE|IZjrjd~A0PcQxXWb^j{OMv;@+EO;)8{$jr;XE!cf>a*q=Aqi<)#Rdhmr~PTP z(jbF8X4hgxl#Sly&Ra8gB1ezikRqL^NY;spG-m~rkIxl^r+`Im#Ip1cxzcgd9@W%# zi?n1~X5)uUlGVwFGVzhQhPk5)MdDOefXi`Z!^0s9$hxb2}7(Nc7+|@V#%9^ zj>E2ojM*ykvbiw?CFD>UiU~4+pV;CEDai2Co(j7iHobT?xsZ?O{K)54q*7^c6q_4C zVlc3pS;8rFlSwk5!G1Qw)3zsj9DWyA8^3-N`?eHPx%lBqpI1eG0e>&?sT;*4KrA&( z0jsa~HDscEa4-p0Gc1&x`1+?RCE$|SQ7Wj{kPm9Ubp?u&w=Ei_)@44 z(b*YCT5xB9J1LJ~3!%A1DOab+8iPgdwvx^7!H8>}v1ZcU$S70xNgYW>RMw1(%1&22WXg0#vb~w!(~akv3+Va1m+B`7^UM zGiBEy*yS7O!$CIaE};BB?9dnEoADb~<%K6B<56+^&_6J>XD>QukSGte9PM;AR+;Q+ zj;b){7|qneftH}d6vZ_$Yd~en^${|GA4>(&`#Hk(j-Pf1Rcw~%m5OOVN=Fn4zHq$u z!$uZ8Gl+X0U;aj(5IeBGESJ(vR`R8O=c(jCn-GYk;{G2?H`*}&G$|9H{P&puvBOjF08Z(4#Id}!{K!beH6^>o(8(3C#5GYXU$Pb(XM`o^rLnJ~zV9O;-}v!!x$!$hrWG1brnt%At3%HvgoV))7b&dByVn~Bj%WhD=*&RW`cS2o0~6OfTdd7jeD zc7rCuv;?C>tPBFSDAg0+`ZqZ6OrtRw<-#3LesDMjb))0tkXlLISJXkGVlslVoLmRzq!aLg$~~KFB+G`8#KNezv$O8(G*ci)vongc93Mv& zlh&%MOnoxMET`3eUL51(h7IQ;ZCcnG)~j@N?UIVbXe>$eM$)IgAkJ{{s{6tS4wNny zG@xPUdX1imj&`w^&U>f}iyigqAr+QvFjl_Q!TN?piQT2@S}2^0W&w?jrC4V%L`nem zL;!3{(+4*RP78`5%Eex%y}r2-PB?5@XnA~6BN95y6+*K9{Z89SQg5L|a>BYSpY%76 zAz5XcGW?XYunWwMm*F=4F%GLNSWwo<$tu#f2k|0o5g0$NnBa`VwA|#N>Gw8Jc`6D< zZ8rpg3qlgBnc}~ea1E?DT%Jfoo)1i+W}R;AJozMtOs})@tQyG0s?!sBw^viHpbptD z%jAR~Cm;CH`QAkJ(;7<5!;_tjbPU{QyuE?qm?g8^WzLP5%sO>cEl=&J(V0#*MaW==?jd@^FJ_K7MXCg6`tmt zG}OW(9c5(@QTX#@6B&%P-hNltDVAn8)4Z_r;E#u9h?5 zqL2GH*QrI>Mu2jpUqWR3oCSLTX8`0!*g$OKr4GCczZ-wFm!2lB-GU$W#cSk5vzm+m z+2NUKvy8AAevS+KvZDyUKz{He(y`5y4zQbtS2!nW z7mUrmKBwbP3F(wH$RyvenHR@G>)g=-gV3g1LNQ`<{qRZ$PH%2s^23@&Bqhc_v@DX9 zq%3F{h?@QqQ7NANrswb~$80v^{-nOIf7Iq-yxa~7w3e#yAVP)2gxw!qgjW-v``ub< zvD+StZjrh08fPT!X{tHCU=CETRvRWOS3Sa#lO`PqsKKcSCytC2a*S9Dl8InpOyHeNgpSE!KJu7(q-xkfWOofE3|Z$O2<&mJt$O z=eSL>YV5H%PccD7LpJe)?My4Te?1}X^ky1m9^T-X>38)di3RAYy+TvA3o%JZig}lB zB$Vj<{Z3e<;uLWCa2|fu@zPR%yv|>vClHhIt4tu!*@D=dTwJFsheWg~>9q=Pat6`? zOPkzCLtk|SK^gF9Q??G!FDBbkJ9@sEoKOghKM+Ut1{P;{i?fgx1I9wVh^|UXz^9dL zOrc`WSGvE@+=Afu1Gp=jq8o#Q)&g>7X^^TqiLMvU(WCJ9yzsZl4n@Mo>K44?@LRs!SxB$ea=VOn=NduT zFEf$4xN$M?ze8BvtNr8Mvi9^H4&H2?QdW0k13mq>zcZ=x2DEOa)bw`=OIz4iW2{|! zr{nIzZ%@A(A$D<_DP}ZpRT6#WkUM)7$|UX8YJGpl$8U(e8rmaFE!TrnR;Gc&uu9w{y~L@$+#Vg*qW~ z`~3lN=xziBGyqO*gA#GG&n*(;LvGx*`P-p z5+ya!MCWlU;7Ui13G}ES?(O-;@RCa`E)Rd=km>ziCK(~$suJlcNeN}Xti;@~);?&j zxzv@WxE`DSkYlFx4|`2M?>l{fn9SfF0VoZz4-@%Zyr`d2xbA{y6nKw*$d5P=Q~0~_ zE6sQ?N;=K3|H8)UJhIoHa>24hjk=HNB|_ ze?fk@9HAqbOY~#T2;D;Cr^N*lY~vzIzmkk>E>n@*3;LJ#{>(wrD-Aezc$4r~4x3() zF?(q+b*V;75&*^Z!}s0Glsw7P+L_Ivt%3-{xzNQWBj{2=j;M-kY%TT{5C>rnx(lMe zai-FGfGtraqVq9|peREk{>mfqZ^;3z1Ln=BCG>X=+hhv-ImPvlLiC6cEyCa1t6HqW zoyI5+2>;-i>BHAvb!7CTN>LQ>BhgEpP0XM--#?NAE;r=z+H(6R$KBCjx#2;5%aVc! z*;zP&e3kt(VK|~&tm{UkErfq@l=SK<6*@k~#t^7WQA8&u8~D*AomKt_T_Xs~&E4y) zv=_ISn5bUTpLG49-=2Q=5Pmf|6JdFFv4I0Qm*KX_&C|%p^BX%mmuX* zbsRq7JTyC0$$dJD8d)X)c51bsIiAHIX7vai*Zxa^y3GahEltq zw$iCD6O7isU#!8B+pjocGwagit z)0+ED$9*hODd9RId*A=Y!ma2Uhh7*8UWET3gZBc1t}VIJQ1(vwnzPs}>%(X%m-dN= zIK#6z&h0C4Ny_Of%9wqP)(vEf8wH*JQERlohvae1rvAD!l#W4`-g=w{n;TlTq+E?8 zEDF&chlKQwia~T`Lml`pa>i^qlfbPtBzU3L-U2P*8_rxa?}QOjzUgQUzr{U2h{962DZoX<2$n4AZ7j8F@%W1({I9)hd7>h(0ZpR9x1E7$us2>EEl=*TonZ%Q%i!+y za~_H@0E_T{3Lb98wC53-{;C(4+G9V!LO zw{lLK376s8I8ADbL4=aB_;y$$C)rKNG-bS_TEToa`NOEjt7S~zJjWSIpMz@N=(RLD zZ2Ip=N8&PiuZ$WLxaFoFw%9ACJd1Bl7Vslp2JJM%KM1#R9-3{cVT-dQCcC!$DCREO zKoFTOO(-wYoGIMaG1I0@xr#@-G>xE8hY}Q9Ki!UibnI=gz3uiE(+udAV#-0dy<<01 zf0c%g`UrZ0gBCF-nd?b_*b@fbAjrl2`P$+ikt+gtxRk|3WQoZQOW_XARI@zH5sjUg z>okVE?2JkMedh2NLpO@}MfgE7dp?+5N0b2sQ>7&8 z+GK@v5$;S>YH|4*#Y^mPcbA)sh=(yUIu=z*LV9)@carY4^Y*Sp6rUiRn6VLXdmU`v z%|X*X!SOE93N6S@+)j~ctRs8K?)#-!%{ z(yMl6Gw^#LM7wt#@_bS9w4wI=u(7b1pJ*+w!8Oku_&nUxSx5^@TYPw2hq_KDUgF(m zQbTNzBhhOJffr3m*XG7Y9>+{@MLZH4L5KKAjq@* zhmOu2LjMV3Q*~h94A1Bd4|MP*ZaGj}c1hgZ$f2{qP7EW#DaOW_Lbfe|in4$_W#Rhd>6BdI8K z$I=ybmkn5B3B@E}QLTRsY+77_iL<>&c>xDp5gtk|%YeRG`r$(7E3KHDbQ)Hpa&skc ztGNT=BBD`wiXqKBTVNiNmLLH+#{s zm4v3e5~eI9bjRs7vQXOP8|*yPT|dFzK$wPbsk4zbealgR+pdiv?!-hLwaN#`iWHmz zigdG9^PGQzJ#iWN!H;d-|idJkr z^aSbXr70>yGyQ9Hj37tMFQX8Tku~Q@oNT&IR+zMBa1NzVz z2*K>&m5Y*U7LZAi7mq}13G!)Ke{oBPoz7WW zIrT#;K3EZ)Dre*zcH!_N#G;(0CJzr1#Ope5>;UYcsiXeaZkv#(w&g|YtID6^gu&^Gz+p$Rkw@gV%oc}QV_*Hgr zlXtt87Vjg8Zl@U9Xu!2c-(MbfIegmr?X!V3X<3%#j8rJfazzu3YA)!2TXl6(Gwvz;8QHOXE#rLVMA2V0>ICk2T>CdBmQuLxK z1OBm9BP=?^L^g};8S3(8!R#eJ+z0T7y2-T9c}NQfdw)D@8dFK7o?m_qan1$~^vAH@ z>8BS~Dg8_YAIpn1!vdvoTIm$gY)vh7mfH|S=-8cyYaO}an&S2vDuJj_l(|Il7!ka$ zb#{Hbw~6ptxQ=|#8encX`fVdb={A@Ay+V&5BmnyrK*r&K0-$XJ;i-#Lm0du@ak)S|GaPo{vPDTRk@ek9}$YD1MQ9>dT7$S#m=0vfL{jN@30!X%@-ynx6_0F-G0vg3BRb^&zST zH;f)T*)n=U9x-?NKCH6-3Je`$W%s}vXi{(35}PS zP9K$%9Fxzcl)Q(<(`0^(4ALN2w2^qMGnU@?cA83xrSfGo)PZ`)`uabP82CW|po5M| z)($f~-jUKqrwxI(<*Jbpt)7%|6AJTxsZBr^lzyAXh)k`YsacqM@o2h#COp9zNK08` zKq=_Ej-)8Ycf3b*R^Y62GmsHBCOh2ls*knQ?-~}IjkM)IENW|X#QdV`L&;j@4!S82 zmNgWX9fO2Hnx7|NZ!9@>GynC-+?bM{sPB86JnGkoO)1aiDE*odxL@qj)LC}uW>VfM z=mhvjBPa*?VI^LK6$0N6z-XEmjqSW;6%O@2l&j82vyfATqOJSWK*Y`}{#WdSjO8%Y z9kffPsSlJ%Avbl`k==TOvzYdg*-EwNl|TE`DukNsMg3=~@Es@gy#OtzCqxE=PTVgQ z(&{!+ zk!rlL#)kcaB?)CDNipANC+5AqKsj1{^1_|01(GH^H=Kv`(QQ18sI|1Cgv7Ph(&gxl z1m!+1EiCo}5p^v%a@trmH>qr^!L8}GwL-&%jz`bHeD4dCO z!rQPQixk8-7JvQ~xX4}2L#)D^I>!oTe^7amHeE3kxLIdldpL0m8Q5N702i|R#N!;z zvx7K8X5~^aNps|qJZTKr0yyOiq?N|fV!+<4`jKq<>6(ZWe=+W7rkXBixdi`UQ5die z+*RQw2R^f*ze*}&TCFqyhfEs0xT@J36>bVCu$LX+*l%AxlM#pOE?ctc9zmIq55dmg;f1s zNs5TvQ$ex>G54bU1o@#1*LZ)a&pySONqbsti|X^bH3w)7XtYlyB(+SNmC#uMi}2h$ z%`wwCT6|Gv*XG;C&xsNBR1f_)Nzcxe$HFs-Yu>sjPQ$YtHf_fY z*mIQ-Q=}xa_JCHV7U8Ffg`cxl;X0~l)z5agX3Ojtayc=Ry3XpfWKHeAZttUyUC4X{ z)2g3CWG*+?D@^=7*Rh+;@j#u`AUy%ugLwR0EhF}M#HXM5B<{|kJ#{<}&v)eXAluvy z#1TqL^Bgrjd4#g$HRxSjx%P_i?u9wcSe{9Ix*BYJ0T~oEPJ@B)aIGNLnTHoTm(7g6 zktt`9n5>3bC0vA`AtYTHbO1>0Y^$BK_Kbt`B8N@;>U;(AP=YW;yPZvBitExCL0Ggg z9V;o)a;;EE%E_gLmkQ{}7iDcfyOEm1gr)?fG7#Wa)h`A&zaijnF-TMOqKSzeB6`=<;E`eALFLP!t z!?&kbn%&IR=jSs#HH{uOxH%L35Bmt_%*s8eC=1rQH=Kh-_<6$8VQX1!h;oXzucg{U zc)7E2O*1y)1%x^#W2lM%GPj}vp-BQ+p?u(yiCwT-+#}%?&O_Ra@p+L9xyk~pQj?O4dvn>p$+4SRz9ZV)M^ZAP?xKG)@hD4p z%{6kw!wModa+18o5!0$|xub@2mj{X3q4MiQWjutr4id@bGHZiF&FsH}~g||C&TAwXV7>2Jp%et#- zw1hf?P>1cjMymp%o&!`~kQ?6NU}+P<+1oa}s7T0dBy9pvRStibfMO%De`HJQJ6c-c z5#H(W=`*@~S9!c%9x_N(#Ce&OMTJ$K%Ji`!yo(%Bj%gk|x)nGZ&EyykC3efv5sySf z{}stqU01(PD6T6_>kjXBr0v8ul^wj)1W- zkOVzI&IH)|2!|i4V0~zfPmO6G%6$f zVqSUFDty4XxGwq5PGFlqtE2C~5Vf-9ztt1l@(i-Q40Z7~Dk}>AW3omU@?yZX_Ar{) zHV`oc5f&PvU4iq})Pu!XcdeJ&2(p^@Q~|_RVUIyAZ-hGaL2^aa-BSBC!{230_>l9J z&cCrWGAf^cfTk?d7$|a}B7B&vFkFMl#XW~)G(QzWfiQWFI5%lFSlR?queY0}M8rJn+lDp_9`hT>2cYs`1b-pR4 z_W;3tOtZn>QMYW}5yJ&Z2xic$iO%1&hN&+E30wF*MAwXz> z&`Cn*EkJ0Y_ue6t-}jw!&v|8L-d@T4{#bk7Xx{goci+9|o_p>&=QbD~XC+y~CtafI zfo9F(6Aab?*)busx%051UuiM(%Q*F*3<9N~<_*o`%^A33V`gOdlnaseZsdM_H6@~rv4N+D_cF3npWxl8snm#lh$Ws)uNL9V|i3kr!*F z3`e};*-l6|e4Bi1@EME|*8U7QsQYmDSR*vUcbv1j!P)KI;8NCJw1ah!aJ1358L45} zIFFjV_o#u0E^_?0(ORuF7U|bO$UQ<&uubz_O2_xXNoxrIXtPWBp37Ff=!>&m-b^iH zi9=;L60??~%#RZG-wofV1mdZWHSTwb`j3gure*n*nFdZ>H4X3+r`m>p9;|(AoLOGKzWFko z^tdT-pgO)BaQjmd({&;%6V{CkKXbZTjCJN7B@d9#f?BAo8KGOZwS9|hRv1F zl+&n%(|gx%kc=HYy?6ZvoZdTolsSYr$F?_$>zAa*-zhxlE2l$#GgD;)`W>0MgUfXbL-(*3)d|0M{uDLIThZ%d$Iamg z@D#qtTKEs-<2Xfn%;A+qvs63b6nv?Hq18rFA@@NkOHcy8U*OfH%PRcgI=~lm%}yG# zm_DqY;L7NP>$(6tA>P{KweK-wB~XdV)rf^Gphp%rShPVuO7P_MC>hs+&;Yc#vS}t< z-({+HwCi-0-5A@Yj3>+X{rYVdbd$ZDQz;2v&?zy|(Nx(DT#V{>V~Ry@S0Rc}f8m&=F25aoKo^6?%pg%AUPFc0vF~z;SCzQ0^{*~oXr)( zFmN*0jVXnVgiZ^`sbg^wZsL+8K6+utMVtW<>fi-vSR}g;HdpZ%+TcRO)3zJ zKDZ>tpcDZmV#lD&h_zwHJSTPJ(cR3&s`l)-%VAyrZLS_;ZuGLC1qLZDVgjiLi4 z7MEALw6czJ3m2=pzxo2JXFs@rC%r|NS6L^$CFwcwp|5mSu~l?D+{$UIgR-oDhG_D<*xH{J97}?hPX^2 zGf$N|WyTvb<^(;B>~uFW4VonZpnGs|BI11EK>&6*-6ctzarDmEcL#uqY!a?D)lCn#>Dlx63Xm*7Wwdp8kv?NE)#h;+j*<) zf1URIZXHa1$x27vwnTqV#8mJo!$t&DAz+}}wi#ekH=ILZ&XjI}U1pwTiJgpv>6LCX zW@2*mgqlmX2R|J=Tztv44yg*F@Kw0E=3sDPzf6Bn8#eaB&dRwIk}Jw-q*JwI5c|^N zjxJhSM1L`By%(^LopvYE%WSxU{6N$1cXr~fm2IN-h@6G4jUgW+itG+f5jIomcOjiD zqs&Kaly`N?ZIzURrxclE^4ufhV%}@!$9d#tOLIPhEeWTi9dj4XclN{ytsF>G#1nM@ z(=_OI7S=s^C&Jw*2-Ows0DJzsy98S*pI@7tcWR60Fl1cr=4tGVcR-?X*6tUOlCN3P zRBcgyp>tMiW>*H~d{XLR{R^f@ zh9(T-xaR(k+TXBr){*=6;Ets*nR~t5nDt?`-bJX6Vz$TbQ(dzJDRNAK2DKioydr86 zP!7IlcDT@3;6xKPI(c=Q`}3E3S_G1wyiY+3y=xO$PX#ONhIS`xcD`x@#vWGg(#DpL zqHIlt?bRqE^spE*M}M5gFl`|%LunYz)tJ!|Ytd}%go|8?G=E@Ed{`M#{eh9DrnVzq z{Z>jr*SL7CV9+w3gO2KWk5*?-5rZwh>0LPS`UKUa>xzjx`c8u zDgyi5op7m(u^wLyo^pCEBHk`~buYs8aW{*LcMa-Fj1`jD8-#jwqZ z6E(Oved7t3nQ}2ad9%YUiwQ;DIBveipJ!$UDhW^;_Bc-lRL`XbyNB&AR+_HqpAVRa z6tdCw44s51h=oZe(>Nm(h8jmI`W{|54x=oLx(I0|U$LC8f{4>(wzPzknmi`k-OnCGM)y;xN*twd>h#svm16f`(FHR z@R%|^XIr3(WOJ#+-Snah_ee2si(7!o-3?=uuLj<@HnW{{1gc+yql&poiXoMkyO`D1 zris~SaW)WKq_m)Fr7kScuAr1$sI0WO+to6soKK9qUAVM`N}&Sh08p8T_bXvf_|iBf z!kaY?F|3)W0M^FO?ZI}HKk6bPi@}gP>>_^M z4U-g0hBKTI*}le<3sUWVSA(>}dsPPcXnjh6C;5+=F}k0GY<|&Fvi=TNIa{^)YcIU0 zqy$4CHG6{pioyis!5>cO?so8Jsbhg(st-D?cg6el@k(IwfYIzlh9&Czbi;i~EdEtV zCvo1Zows^kwujWrl-Q~TsA5p>{m92{h2>SG6}H0tou}IMVLW=9^5|z#6LD$1kB5Pv zf_`S}J%D_i^P)q5rN6S-F+9-e)8@R068Xh@ft6DpQubELGcdM6S&D(y9UcFNC=|V@ zh^~^!cgTinc#sQJy&I^`2fJV8S(x2m`(zC=oDK#bWmrwYl_)N!INwh@Rds5RHN8P8 znOz~upad+lOE9N@6U+K9fxu`5GHBu8p z4?AnMeV;k<2j@i!qIf!Kc*agRfk+RQ`WwSx%g1ZyPpnRi!IwWrY4BGzzQVlAlQzPu z%q}5_23MxU7D&x8HxpJ_gYLo3m^SA0Hht~-S!T)+K|{dR@_Zph+X^zGRi7ioL#ZFMmDHg&Yd>O`&cs3TC#B}4XxkR>^rCx z%0U+V1q+vhRdM;7LHW)(b>KI)XjyrdCER5V(3{Wd$ML!=~DC#Nr zp$=&DGl~jO1ddsjIEF#0`_I=neRcP>M=umgHWV2u>hO}v=G@svH31S*b7M}#jsOF) zL3+Hqf};r@=Cr%;gTdXV(18JvKisz$R>G9j=Gv*8TpJ@vqY-$>kg;QXOP5NLTKRB_ z88^ba41gKx99htVS0CY0RBFO4 zj5KvyHKQq{ki~gzr;uchgvU@AZoVQLP_m6tZ@>kpp5imZqW@O3BMSeHh>vYg`0s=H zvmQr&&aUvM(QEp6C$D}9899TKQGo%$!dW_^g2@QkBe>$Si-jk<;Rz(A#*&JA)WJEe z@kE!Py8Sc9Rw1;t52Vyak#c_2A0r~m$ORX`nHwXj5o(;%CQqVt^zF!k3q1;kP5SN>g(^Ws0vD*dquL9BeWUb!z~srZcOj=Ta<7*}s8B#Ls70d_ zDi=BTEEi&1QmYiql^!@D*{N0D@_S0prU0B_ZT|P3@f_z*X#~4n+rh33Oc-{Wh2cHp zFG$Xw!FR4U+E8N!s?I0;|Y08rV^946kSMbe!7Y@cmvR|uf1qvw1)%*JElM1g+*s{|=eq=HD}_E* zqauk`9u`nkCLkI6pdBO8lprX$Zo>0UI_+Whl6H~PtdMKl{z&ZVjcQ!_!}dq#|s*% z6aLo8s~22;2=XCKscBN*-{=@zTs|Y5n_7c!`U!AHQB(Bih&p z#-PGwWe%A@btKZpGiuZ^ppZ0SkSWC~fwMXV#-SWu>Y}9$Nl6Dd)(%JX-Sh<&I%G%* zom$>;Frush#knoS;g%%qAzekqxhK5LrKz@OtfTa}n9GQwsbEhDO{NiEPAND>Xz6xV zP-f9oTX=g%(LWe* z0280QKd|)kVKTx(AY=9#UQ3y1%VVJxH#=VEQlzaxX%96+9EOG>c4M}`A@-~6eB=sj z{0q79cgnJyCB&dO(WK$^E0)%{==bV{fVS?2H;|pP0}B1jt+4d| z8=W?7cCZ_j*sdhcHZJ&gN9de;{7s~#pV-|6GOZkLuph+Yes6X`sw)$y69QEMS+rB7 z7>p`vMrJK?)go6n{3`{*Um>lP%ZInP6xE|4bKB;Y5dLLcMqCJjQIEUMTp}?pr*{5s zIqbNG1TL0T#_au8%Eb{H-zb;d;on@Q>W#RHog8s=_17&wIk<{#K9}^di2G-hg{q!<-XJG5Ojv_u}~S?TM6%V?n(SI-O!0|HJV!$ z3^K~6D8arc0CjzQRVd3qAtki7xjeLK^M>#rF3_e*fpQjajB}Q}tIZpvV3XubZAQuz z@;44Iz(oZ=BC;MBskjb)8^mPcw6Xsza&HXpyO!J=vvQ@IQK%KAS!)vW)xge1ZD+F~ zb?Z=Bp~$-_y#HGAZpyNzHlsoK14b$;o4m!TH4_M@ClI>&80k$tj+ckIh&zBLPq?_*pj(|oAYpc zebh;-HI9!jzximKEI7ve%hudM1_8+Zp2JJ7&a)3!!kAD<1S_Owzl~^7GjcpFO)>Ec&?z<*g-@2it7`U;#|I(SWGV~ z7lXoWi!=t~3>!Y_LS2d9udN;oQ9FoT4aArE-AfcSkgcioanePZ1d4hZsBOXKPirqL ze9A?vUJn&q@+#jz&|(Yj)M*&yj<6Fb1dQ07xNelM!M}7@MKb&0yvcQS4qbHY_n=~jDO83 zt96pymd|*%@1k8xd+-1XBV(_I^Ws7)?)7!U*GbMEMW-b;Y?nj$hVxd>N!Dr2-5Jdh z2eO!xZ<2!1+){%!d@B*Y!oeg!G7GUR5ca=2OqfPfew$SE-o!E{rAimyaq4PqWlyFk zxKjPl7gTKR5viI|E^YB$N+XU`W^vdo{5=<uI|U>SIplh zx7eSUJrVLQ&I49RT52{r*e;a3p&z&?)hl^Z_v^~$_yAyWA53JV2yS=MZulWZqkEV8 z1zx8s{K$o=9$}_n3fm42BYqq$@7 z|Hvu!x@4!|AkwAAa`>%NR!;!d_F62V9i3cuMJP?~N3j=<3C)0EbT8r@Y+DLK{EkA< z%d1N@3=MgSG>?Dpl2n^kIyU|OsJQ&v@T9x%u++7juk8;Mfn&jXg5Qd&3)rXAh}Y_@ ztBPKbp&hJks|n2`iyWOjww>$S*^N(|ssj{(jOGym&#U68c-vF8x72&8_0JrS6O z5c0X>zjVX(NPGjxczA{D)^L62tnLcgq}itksX)ma-YFlm1uEYDsbr+vH#CkxX<#Eo zJ*KW6Z{Q+afnTky9`hhN7zZE=>%PV@=q0dnpE|#=ydmX6w>Tv>qswz6m#5nCXQQM1 z8bl|V_n!n)jX<@OY33V~`g-^mxZc7|oUVHOXK$b1c$x%&mB-qap58lnD}H*?-J zFLO9NbMeNQlv2WyW*k=|H+#yPlNo)q^;nI)17Rw*NjhvooJ zMwqnf#v1yS%|_^nx1>PgIP4;EW|hv*a4Q#O3nX2;^L4v%#|?h$YcW;fiL zL^O?ksSL|}X`$7|_+JgTamuur&p@=V+tpAGQYFYJP!NF<(E9GSmxmUcN1Kb`woY4Z zqh^hcd*#+ck^$FZ9G%MGpz@GYMu81aWXj3!y5V+|0DpM<%kauIn;z+Zdlw{ae0An| zT8uK6!3qpGl28hupy&a~xy+IxDCjuh4o+IFTitQrubK*gw02WfH5L2_UITdWPoogx zn!}XXT=ao=x{Fe+a!tSJ>Oh|#`Mo(PK}U)&zRHH(8KjlP0g}(0^h_sB35MeN53#{> zAc8uAFEX;TNOmR~+E2TiU6{k#vlwr3wv(r6_`>Eu6|(rd?0maXVeT7wat_7hfQ0F% z8E5ROh;8I%OQ^Yc)#}LX#-7!Rivhb=+coz^)_JkewqK!W0eMBGa!Y5!hK-@_q?^$KgQre&8Lit` zFBHPtuu+%uQWkClc2>5pcALm*r5B4qeCx=_;ezQ45&q`pSnsrH&ssWtsQ@PMI|!et zv<;;PT^}|&<4Hc~`dr1o4eN7f{SC>E8CM2=RQ0A3{Y7E(wWYsEJOx^CaKlBqgEh8p zZ+0Z}7ILzsEggX#0~983;uPG!#K~?dg+~yltiHhWk(=+TSf0i8`Fa zHkYCL#i#>-H)_*l5TYizIf^|1TVtX00J-QJmR9JJ}?_>AYO^L9k?AQx9pb z99cJvkg*0vpQE^uGK~x_LjUef2u%Xc5R4`gKzMXosDf38vJ9Qk%1}IlR0mLhbpCi2hCI8(4 zvdd+t_DZ@Xq;BK5C?$Up@Zfo9-vbDe)C1;xqH8Vjx?L<`n3%IdPm4(dd9iY95b)+k0Gy>LrEx&E!sPk{pOU=Dyyd zgS++~nA(eC1mvZCMI{H1x~iK_m~iUq*P&V?Q&P%b1QQWP1V2773-Lzdx*M*f2st$h zPg)b{=0k1=hkY*0NvqKwP<5s$Rn4hTkH8v)o!!1i2HTX&P`#DuBWKVWBEytr-OiX? zfPgSeag2O|8)L3Eg_G0Js{1Jocf~}9S@eV=K<(!#Q@FQjx)CK*hM;gomh z9iS}iqMi0iz;+ZD@_{-i#kHHQ`t@#J$KjiyX^KHSo3+-cP0ByjMoGlVd6-jC-hg7z zmcftb2O;_BCh3YNX6PPo4_Zo|K> zJ+=7#%IlX3CwAjb&u%GhG(n69Pz?N?GP}z3v6-R6F&Zj7(B;@wDMvQn_RC2t!-@^& zQ%bv~Iz9fXX5=V$K)J3@xmXhXK`u*mtVCWG^9{s>nN$R4wDu?f@t!ehKq@$KP zf)fH)GA%vr#EJGZ(yV4;zbirs`W1qnDdic`vd8oAPjnK=Bb{~5G=FCP0MLjbvvcfq zHF(WY9Sj`|ue(V?_Bxyv$kYQINUA|jhn=)q`xJIiN`Vw5%!3qvvf0uQ8APZe2(!6x zl%pWB)?_7zv=cdB3F9#$pz;Cbq!VD{7InsT!n{j(qVb0jL26zh#NomkMgd}Xnm8j8 z)*aRXOT5U2<^sv-enWnTKVo>W^HkRsEoZLokUCYz+mzKX z1aKrqW5K^dj@yC*Mhq82G)IPT4idX$CoDT(wH<3u8bOq?AELa>T}YoF>(*#XVjw(( z5^%kY@MtDW!!AK|T?kH{Hs0giN?m}GNtJ7woRQ5EMvKC%KhF+3Lq3rNnu4_OnUv@uE?!|$=snps@I8GYeVEqoZ zK54oKQ#WAkYs3yK6>1ky78=Q%L2-T*{)3bSQ>5Y1E=rod>T@Dh5m83Xq;TyMs?1_| z3`OAt-dGVaT!_cI5Q&{&rPWx`H&TKU_~HGBC{srKI1=LzN7gY+N{(bj06#|S|jUk2^8a0h_Q-ere?zvox3_iBjfIkeGnwivXZMT`k@=1 zL`K?Zv>vRfy)9>q?zKYBPaz?P`BLZTOpx@~P8&tQp6Z-wZ&+_v z-XfGd&ItU6fUUue+zn5&hlIn#yVTR2FvYQlJC!R3A==|I76_*gChJVkut$V;gX2mn zjA4r_{JArxJz_p_uLvc>6^|IvYm-wM`!mUoKes+`z7_qlo$xH@Pn&r9S{{ZbHqU_w z-3`wsD_<)W8jf#9SgIsJJjXe!RcT3)3mPj@=D^K~nWYsiDQ%D0o!c$?t@} zbow+uRNqG~qaM5~o|6U_&+{5>&p>a26oK)2x(Q6~nf6?Z#Y0iV|GAWrrWoEPwsGch z%mn@+JkJHIHbt_b-Mi_R2b2+xwNOSOAyp9VuP6WmK=Ju7Gp6XIVgcgj&~$UYade>_ z!t-64>Tp<_@Zg+j5QVq&nrnRuOdTpu{{%B>a5l8FZX;Ux3ob?!JUbSi!;e;QsAty$k7y) zUu~G2)Ws+tK#@4?^mJ02*e@xGjt_>*SWLvIksOy~S7 zoV8lTYdMZ2Dcvd+>B|e-!p^ zW16}1(fx`}@Q;*%T^k2}EQFcxDkrXf_1TiIPgO~Ql1AlXv#!ummmMVnNJvLUYXxyc z5@CyWd$rT1`E;YRrWCfFhm@hT2OQC*i$VcpX7(sf9vqrOp}*HSX|=&*jhsKRFk%SH zW&6i8ubDp7*BVPK#T8!XEY+_v(@_13c*Sp}S$E@vW~>aMnH!6%45kSGLRz{KajFh# zDM{r{i;N7fcM+;zs_gz|=koM{l2gAVdqdCUVR2*eT*8sBo9}Va1d-lAk<7S&0j365 zC%n<6Ni;5Gf3z8Wrc`Gcm>3ss%z^YxB&92Xzl%;bWTfbM&f(2YUER%mNJoThOqU^L zJ0ngQuqNU^*BYH#sU{9DP)!Hvy8s@H#W z&gy=Ub;aJ5i9$&eizi8t^J|m0+2cxlfS$0`32#rNw#V(wN9#l=8Nz-t!8^!C9oku$ zmVQD*l4)U`@J^?!c8(e)@6s3DgC*uFvU80A#^Q2r>F_QJaSnutFNxKGSSz0^E8V-s zIK>AH0=Upr8etRr1-hq-kd*O^H6(iKmITZ2Q4#$h?7L+U~@as%vH zox%4|Sc_s=ziI3G@LrdydQFv^?sE}WgwWhmMzL%~91;S&WqO?6A_UQE1S<>&zuO)D|Zxqqs4H0uZ{{*;0?To=JN&}`vT&dn}8cqwM>?Rge@ zzi5HWwDdQWB2CMDL>7VEBv+Q2p>GK)+}=1CzDw zq+KCJpR8sO#Io@mg5oyfYDAGY|wVG_4hdnz%?m%{cJV% zd1tRSoAq_`PJP!o3^9j_TYO(2)oGy07X^I?_W!fp#xFYYRY~ufaSsWVM++pfPP>wx z-E5ryFGb@xr%k!ofSPH9FS#t$uiak(rf|k^<2c%5+8h~}cwKi3F5FQbBhU?BCb9TC zuw;j&jl`m_IB&HHm+km^sQN>jW>FT@7m(lW+% zfT7wtLW6;H-SAyXLVYE(4eSE(XmrB&T#jl_inWSGi^U~h5lwf*IGuqY@5=Z-*~JpT zG$%6?ZEv`8e&F=gn?c!Z+3Q8~e^X3Z_6_r_vdrB^b9>aNQ8s|$Pa%#D}5Y*$e3y%;CZE!Z-gI{m0Ofv)dfx;)EbMamEk8YKy_P~0R1bx z6fotDkLy=4TeBN}YI68~vnZ7z3s(M_%dr!}u03oGM#rvQ4HZB|KWUvO4@~|!8* z^QQrrnP}IrzRat6Hh6~BiORvbS2yig$sBsTwyW7#?uK7eAT{4mNnuV#Go+S3{!0iQlLjnHD~5q3yAZBj=m1KR?Zh2Ivy7t zuIuzC8pRe7WO}yKp|kU#H`ceHG628_`bKD_&^v*i9Io$D{BeCFIgXNV#Qr7ojhsqh z>8hh~rmJ>z&KtO7)yl$OPmjaWpk(bl&iFPS(LC0InYDnF3g*?^kkq=jwH!2q^G42F zJr*)|UC(YKzYs7n5HjPkCSP{Ljme6?Q)XtR0)K8K-o)9f!x(HN&dlmC>_L(qk4?@( znS`5?*lcPfI5IKpX3ki>%`>>26-XxM=#HKgXz{2>xCUI~90fNgJw2oF4P&oA+`_4= zUvbvdy`E8pa?hw<<~^ggBq`&E7dBkn3b%5~_4sP-8IU(=3-goE$V#;(7CN`oqN#ps z(onsz6UVT1I{_rz##yVk*1e5k3N^9TE#~8tP(_#+w=QSG+YuwE7(ij@F+!u~!jz7o z-(&TDTbHJKD)c^v{aE-)#b447PzLy9E=cKs@MNn0b`%GH9%iA1c2Lr`hJRoC zee?}Fa5x#tTCxXK_)t24#Pqt(;e_Z80=>f>oUi)*+YS2Eb|qMsMJyBrAUtLl$?cKs z8&uJu?hmIsP4#?cP0U3z5`>hEnL>Fq(T$SBGp_L6a0Z$2cgi%hS_>Q3UL7Eu>D1N! zK%*}D{Cn`+(&DaOe5KuR7Wt`2ymWmhobANbquGeFTlKNCYnetTVsYM`j9Hg(;BGjF zw6EB56y4S3sD3rpC5WLC^g)yU?tw$?!$_M2SvQ(5$>)A39zb zm=N&o130smE+Dl{Gz>243}M4yh#^LuaG}#zt9Bm?`uX>KTqUM0Q=kJ;4>Nn=% zE4w}VS5bd}BD0gkXeq=GFQFjt!eUtOvZnRXc%KSdMw4!0eY6`kkd^Z=whxi>f(+Fm z1Qkny8D7>#XRh8)u)bpZk}?tZLlmmr?YWo?n^AScCQ8B%ixI{(wTuy=oNe%cmA>CAaKc zz$(KATevup+y_Y1%S8_|ki@>`mK{A1+U>jH64G%w(M57WliQi$QYTE?nvy%Ubsa>M zFSA-X(08*mKs9^?}>6tidGqLJ*4dpc{{TkUIBMKCcx_6l%qr@9>PMP9DhB#X^l zEcbTOYPYoYXSrD)HIJj40ZhKjEXezi5Pvw)eL+uB6opG&v&~u4>>Wc;V!KkE3p|~2 zSwuDJJa>zt4gsa0Imz$f4DW{RPM-G24yNpeV|$MyB%rH;E?@M2YuQn!sU9-wyoxs~ zMG;Aylm`HgSPGQ7V7-~Fk#`t7IzIznW7>4s>Fn3iQIcWWyn@7A=Rr9E3cz_~d39!S z8nqN-#GEUuU5S!d4^td%hyj=~DeR0ySzC-z6e_JH9E-5f2$wr^^}4~Bm!d7Nx66Dj z9tc6hc38N1tE1Qf)TY}n(> z7vu2<&)wSBc(b-|5k_KD#tO(6)^PZy^JvDU3WavqOKI5No%T|*jiLZ91?T+1QdoQI zZ7PX=mUwDaTi9dXtNCVd%T1yLbaN^93a%tI*D?sMumQKv>8l&T=n<;{k4B_-7(vbF z8%&alesRob4ID3|QSf2P+0s@{%d2v~(iNa&u8Z~uQu>V!DP(A4*i6Xq>x8SEF3l`1 ztq}8Aa+1gPh3+xz%3B*Fn}l9)x=C_{gAbT2v@j*2o*x30x;kOgWU;&T+3kvD;F>Za z@k7?f?@Ln7$N0J-k#XT_C#-gN*_-t{ol$}0-Hx&>99QOO-TRSC)??U|nQkA)i3)Jw zU?&$D4EJ}^v^lEp{!fM{+k&GGZ(*=vqQ`hWG0Nj_0bDB zqEf~+cww*QD;=^XP%lEk&U}#bR zb?f71H|is2*Mf^VGvao`49R4IXoMpx%VE}u(i9c~ZE#y`S@|pqYtp7eWTDHq`H~T_ zQb&{2^n&Db?+!bE^(2i)(s_Sf|A6NlrtBx>bMLqWJknf+GiDYYevV>rA=;S-lpiG< zyePWmyO*+w7CRGM%E5O=1N{Kk&oG1vHlkm zl&Q|mLh6K;)9*}b@IDbyJ)mN7Gc}pb?o`-&r!*XBOY=TsE<1m9l|&ogOY`^?yT79d z==iQofj`8U*gVlG9-OIqZQ~8y*O{(g%KXJGQ_fb=;Vx=)47UqbX|_>5pO$`<&udTw;sF@ZK$REHg3JtXUteN(M|&rc{GUy%7J3gUZj&a zt5hD2x-e<;Xph4=!;+VfRXs<3zHzDz0YgWO*Kqs79~VBM0bI!@x4zM9d`R)CWQ#}4cjO1wZV{+pFEd?-c8z4lAO z!<_vjU;8DpUlL#YC6{XO0uHl;dx-JAXr+gf^i(_>GP9Xt?~K)&C41f;hlv15?Ad;) zcaw9MP^rT{6bmlzI(a08;4GsSMVC2D;ZZJ1_1NPkucViv@^btX$9fY1_p<^fRbeDt zLCgr8I-~gowJAxLqFluVguD8Xhfm??&eH&XRSk?D4UbNg(DV>ZQCq^4!Gs}hX^Mi- zVvLU=ImaNJ2h89%)9SHKow7=gH*2#Lk)(s{S8&sXq6N6k#}wMfQ34ttI%>n?ow@q8 zWIcqjWaWy83MNXQuLXrM^82VyASZo35}(&tWRPelJkgn}C)kYxDW?v%KN=g_U$jM zbypBSaA9p4m2KsSaD0@4p%>PU2ON$HchFNPC2bN;SFGS1y@%ncE^M`{*XOLhZpR#= z#-p(+^oVW`!8M00>u4c9jdE};qLEriyYqyQ@N^d?%@7v|AZSPJ1r)8K@n3^Nx)Ikb zsc~r3XHYVZ9q3sk#6~w0^8H<^l(ECNhf${@6_LbB+9Tj`=oDN`&m=!b4xQ<+x1Qy^ zX`z8-A3QMRao~05V%L{K&$j1ePrzW4BYmLDpW}Sht?wN{?3D6mWd?Zg0zAv`B8(SC zBpij$5|KdM0}*|t_vMWn6at`R?4t}up$)0)I^_SmXw{pBxrnqThJ8mI~iXPjo zG>CDYMJCDY3Wv6pzI!fZp;_BroJChy#lA$;2Pj9@=CbN}F4PtH*-6axgOH`+O1efl zB@xGOv&A)v15j96Bk0CxH`DshcL`G#)OxSBRslzKpCGSe?d(q0m;Rc>ob0T55l(Xi zTc4fq0_Uz?^d-D9!xRs(y|ZKUtP5iQ)D17BB)5em`UY9-0b|(QSTAyEc2&MBn|kvg z+9LUH3^ty=E%hRF%eRKT-*M?ea413mlnS6+bR2v9Et1c;6KyZ)Z(X#s)lpvu4CcE$ zs0^ZU{Vv;{eKAF0mu@wW_3zRzaam5XOAkU`qf7g3S(pAh3d?x{@wr;v*g}>%;iWF- zHvC}l$g{UutYuV&6BcZh2@6(M_%+55M!?@^B8Y<$6JIWyVF1;j3ee zyE}#&C6T2@?(1%NIVGV#BL?OV#a!Yc97-Nuktj+w(x{{HxR6czb{|yfLGk-Tp1J%F zl!Jy=G)JM^5v}@47od9L$OOR5Q7TH22Nd0`IozH_PNsOU4Qdon3~C)3Y>2SR2Ggrt zlxl0!dPY8jzYHb}qmdy~dH+OmDjF_xTU@ze7V**iY8Rl|nlb@;7Q1Cckzx;MsQ5A| z{+UuR;s?D5n_;oEHEkC5-SC&%sH?gOTqR5U5E4$G0C%z=r3U=PGGbUM>G=*(2>>s^F2)rvzNSFxi8 zLn?+8_5l@9BmfL#+i~QX+f{F%DC`)=XOZsQ;?$Ijo_(XsQawn#W6UB>H+rG^0ZPBv zA6YUX-b5j&HhE7;gg3hg+wg<6)g~YcA{61IM@+~=X7v9`dR0{JwDQ;S7U!>CkXaK@ zZJUDXL6Z>j0hVMY&s!-E*JfCjt#C^@{F@6=9rU1Hce^c)j0hs@P+ys$GsH6)-eznt z7C8MM4{vui8pDH|e0i0&uH2(kqGi>RfQ<~OZg>aL&j1X{3+nl&2=04a(F zP5G&@0#{TrY7H8%%#!&<#k(jRX9mQ-%){l5ltCDQ?{;ab-9z@q^=55>Jdk*t_7Jig z-eXLNaLJOp-|I}(uhp0iM}J!$MC7NA%lgm1lZ=M7ghYBz{tsu{hJPFUTC>@|@rqaobO3e22c5Zk&F9NeHnGlQJbXljC_sv0l4*1V z452xXlc^Yz)D0h^2vpsCmADzF%jJxU54$93b2>p>arQ$LDyDGmi&R;M`v@hGu&G$; zoC%efZNo=hh;32M{*^-9sadS-Y$eVkoE#K1y1+AKs1-Y?;nqFYY_@7}?H!lSp&`9! zn~zZmU9f6H&s1-}7%&4X28n!*ls&IvOZdDqUaMEJMS>NyDR4pxl)_R2 zp>tB)1l8!cgOB&|1q#6(MOykx90Q&3MHir2H?1`SGDu+QBs z<9fO{n&s;hh$+-D6m>{by^!46>B4FY-*9=VU0pT+`bM-CF{SeMB;vW@})4|%LS|6hsac>2A5`YCt*Yv@5KjE4?9o+s2jdb zF53AF_|^zB8^WkE;IwJ&kzputcI}w*T=w9oD?obgLSpfYc*5^FVVbABwBJGud6+4F zs$JDsM11d*23B;#_l;}0eGFdtA2?UF?#YIJkK;#xWd7k^mR!ZiaMon|A^E6pw8{aU zCgDd;T5W}89+pyhpA1VP)4j0Ic9u;j94!O3A5#KOz_K`SQ}8D)MD;$1b@TkvqJSjW zBit{{?4F;JlfFsKjP)8_-UGtVoPIBUHrVDi`Vx?Ktzr!*ErBkC*s!s)AD zfsZBG9VzEY3YNTrO!wt?arO5o7eE2117}uH%YMn+J01xUe&ynfSGIZ=nAkU}=v zp5egc8HHGVMn4bg2T&NkFC;9@$ylJ=Y3nkER%)Go*rHi>VH28E(=d54m%*8+#3)HX0crQ}dAz&Fr2TMx zm#exfdkghs`*E-CJahQsE2&hhHwLyr`is)d;1oo zFf{2joQ8I;ba~o1{2^Ajbz>b)ryEidn&502j7RNEop2);qIwOOHA=tLSO#dLdqsAm z86TUunQu%PI3q%h!8^fRwQv*Xuio0U&Js1C_LU4rd=k4oEzNzoH#JHb&=Oq{ZswHL zGk_c2>wa#i;SYl;Y< zO;JWGO@y~3>vh43yoXykPqi;IdQR=I>2^oDdry|beq=bp;^q?T*3J^bZJcFmeAxbe zj!bX&*jLDa1IQ`cad=>}(JCa}{Rx?9BWvNyi!KVcb-vA&e7Vt7sC9bLMG`C~tH_IR z=<$x@ERwd+MHUmo_Ksp{Ez5fpWeV*jsAUvJYI*MG?I;fy)f`?iez_5_fpdEot9tsX z9$z^`>FHN!gftY+oD4Ly{hv|-%naO1%aR`$dKT{BGE{q9tYaC1_AI-S1PJ0u@3n6; z%kMM_z_}Y`u@3VL+fF#$i8ocg6C=(Q#Zm<6aT0+)+SJWUsI}Mw#o>1bNvZK0E2y}G zd>sbjJjtFHx{fkKoMbwl4P&_E);=_tc_b_rJ(Up;jT_zt^RWA zDEc;XD3yC-eggb?lz>zFtoVUV+;F~&P(9Dt^YeCmj-$SSCGhHI&Yt{kpS5>r>}Ng zxYC6->$~NUVaiN2&Yf!vJGho26$%wl2F~ZQjzDm=w}0KGsBQ$EXYF=ve!DU-VHo2| zrEs*-I+AnDVN*1$!UDkxr4!aWaoU&@dnRTU=7GePyAsHl-1*q~G{X&K=E!TzBL1Y) zDFjb!bOF*P98^fYcw;z8CdaOYvop` znJuKDwPCvs;UXv6hOY;&05c+PhLxkC4NkhaskK_+l=!Wr76*cCB5KhdbW0003KzR1 z)qUMYd7q>lA{WA!*qx{FCCY(15h@Q*6#kPqKZbC;HlmiW57i8px(wBtMs8de@6wK5 z#**GF9(=^+6TBPlL0)!pnFxlu?IWq+K{yOTqr=>4^c)HZL4^y zM5fq>DF~-D7#uU6I2E?L1l3cTNf6=&asfqf$BCQqjgXIROf}94vDH2nMxCvCD7y}i zH)>1gki;N50My!^_A#6Qwu5Y(E|=S#h2y#_!6FYk6AA6{1rTY z;446zkDCCohLxCDlFGBk1xTBz)F{V1tS`QdC&QO7fp#(+n`2D*(~O?ICId1)w7f{% zP7V_;LvnToM6X&Fk~WBZr6Hk*yxaF;T}MEJVlN{V3P49x<2p%eBw`!;mS!84hU zGc;Z)=97ah!pXcjX>o;gW|$25AnmK1{PN1DwNCFyO7E9tU^zZ1cV-ec-;sj`s|jJ`Dnd3K4p2<` z3Q*dJXR!N-3~s#fUm1vlr$f1AFE;zwsg|nN?cQ7O!Sc$H4Z4^SrBn~70yxw$y0H%k}5*vFeh61 zulu`zW0fzmchFSgiVujf96NC^2g8qQc&Qjn`P#Qz#q|@VqI(G8rtE$Xpyc$iH9Fye zE?#vfuh#30&Oohxh-BQh$trLSwL0FF9_0L2R5p)wC9ZutjpKF3jG1!li!2@)YPex9 zGPY+jujd*Rj0*#51yQuH;ZPkM!AYGk?P66g$z9oChZiF1;Ohe@yi7b&SKT6Q@{g*ssS9uCnT93=&!Dst*uX(esDb-Xt5XrRmP>?6kju!_HXU1MCgR z-AY*ilGv7Ajn4dqwe3@(*_0VJR$p^B_Z(%Qi9FLenmLr`U4%4)4L7R*d6bePd4Wvy za?LLKqdwSqs$H-4sIyj!&>;kI)M|gCQIC*@z6Gh52sJR{Ib@5@mZl|h6&HKJBn)>y zOEQWZ6$3pU5rJ?JEdjAaL9*`5B$gS^#%;MM)i$|z=bjxUae!oqy3FU{JoqkC+ieQL z*UKSrcsW*sSa$Zr!Gr>Hl7PP4#<9X9hj=>VBEn?!ftp71&|d1PFTdD z1E_q$gV}>iP;H~zi_d3i@SH_Ql2`4=h-DAnAuns|@nAqyybfJwthSXhz1p*@5TQhQ z`T>Kr+|Lk;LRx$2^3Jj~c{u7Koa{?4Axc zN8fRmXAc@`umNW;!Bl-*V8#*OA6%lP;@YKWnYc>HE(E7-;10R-2w6!B%dy-6b^`c7 zu?l&(#>KsVlDG!0a@ti2mB>c-6&+)uHuGZDR@*CxJkXkh>H+J9hw_o==wX}^`y%A1k#)l(DGKAXxYOPVk8<+rMyVKu0wjo3;UtG? zAnw9DNx{y=+&l>+ zE4q=pda3Be@ML`T@bDC}ixKCw5W-VkqUu**n$Fy-@f|pjb-% zsa-gn5k8ZIv>&EW3X17B*it_dp5o}W`3o01nr3b zh8H+dS{v%ULX(3riWicClh^W*IVK*|v2xPI6GqmHoGPsm4MU%)7tl!mhNP#1)STbU z%@Gq84ZsB`YN-9Kb5}bK>;)Q>aN9*Mu8Htsia;m+OdG*)QXsHXw~f2-5+|?DHaGG- zh_-YeRHOboQqxj2DMyfD+uZ5LMu(R=d$rAA?1oc#^b)9%|2@gsf3c4W6*JqsY5|7? zqrG3|q-jc<_X}YO$cSD}qBBYjJia2{c1z_9uW-^do4B%_OW-ERKTr_9X)ENgy)nJI z7`=Vrl`cWrwu3ErR5wCU$cHuW`&trL%9tj-Q$p4w-bntK%*d;T-Yn(962T_;-1j4{yOK@r~ zB$2m}2j8vBheP}rUgtDvuhlVPm+nC{)_)|HIyxe?qdO_Q&qcT@sX2P!M3kOy3nV7fe^DlOf34@%sqbqx7Q_2pf;0td zHw&>{5XophKpI*GWG$z*if zWPHP@dJ(?iTxsTMX^#!Vz8;~B>6>Jt`iwyvGlv%2t-0Cu3ac-F%jv4UzNSuk13&6{ zJmdT}Ir&2K6WBf+d&8|n8(QCS?lkRbSn@vYf-_?Y*o645J-Dg)C7hvQ3-<3hd764N z3?a9R9H5bYpQMEmj1r|RSF?Tf>LxZng-n;1W)7?LN@u5XuW#?9G%2PPT|A^gO-(@ZZ*&B6bRNXGS3ai2GjLyU*Q){pQb&u!Bu6)&kkm6 zza-n;_}mMuH6iAqF1xZX_Oa?&$4A?*T#QCqD+sc*mLf|B$l&8LFZ-g%XOjM!l2Xr^ z<-A7>6XY0TFnGi?+^@UV(YeKcQJAYA_hi48@V?Vcwv8#rUy>+U;!3Y0P3kWB2vX07dG775bqZ{$>IYQ*l>hl}Q91T(T5 zlk8lOnUWvI>BLNK9$i2!=x%=6xQWwG;P-1QQ9~4ZyBG*>0^O7X(UoL(P_y+4b2Ara z_sI*>7up25IR)Vhr^|V-M2(C$TPxuf&YYINUhvLlI7V|z(lGMiu2_h?!FPX$6QnBijy#f4 zokl7uJX=6cA^QI8+Snt$yi{Mtc@%#fV&LNKwYI@!^ThhbAX?hqIk@8^l^$6QL4}APA*hIGhol zLqbfobW4O~9J4!J#~Qsk#f18rGp4!lG6>?EGmnoB#-E8OpfsLaw`mOHIdv<1=*G)NOr&W)*_tIt+Q6}z{>gz zQ}XZW8(ZTLDRk3*Y3iO^Pj)LV_*e{9Hg-6Qg-P4(`g#&Fo12ocm zk(8D(4zAJ1yK?=K7`;%@+)yH=k#V{%I z#@lW=(P?l(j>_G_}KUk>o7Z={6s7nffM>_c$4NbN;3p$`dAnFuSy2SE*EYN z_x@20jKQoX^B83=ERv447iO_Yy4!o#v~jNT9w)viiMX(MiXcX?mjtwDAxO05w`+aL31>;$ z@W=q{!3uarcqIw%3c_rV#9Cc(x2?0Qxwx!FojCaSxfoX^J*>l)Mq(9-$u?J$q`hndjA+*J zDoISL`%x-RI+i*Wy;m{n?*1-H+V+#9h!B3Io-D;Q<_D0O2CAOJyc6f$p`$<03Dc(S z!XeIFg)5Gc93lzdb>414c|3JyJ;=%a2-c9H8g+xzr-6Duy7K74OjqN)VTIWjVcPlA z7K1B5nkAC7uEF%+nglbH0DtZc+Jle@+t-_Q?lhy=EQtQTpd7NXH_1+CabY7uokpeE z^svjaAHPqSO{=#e`f-?4bCl}FU|}{h3$`wXd1v38#GcQCRtN)MAUM19G7GFAl#Txl z4|e>tHJ4>&$V<*68O;&WaQ;X3;!3l=d*N_fK7~bRN^`zo!w>fhPYKGXmPkeIY_{w8 zF0;>CPPPsIzV=Hq1fjDFhcm*qJ)7;X#A@(i+3C{e9=l)Y>>`30*+WRi_Z26q$@vDu z9-S#I_Os6{CqTy1Aq!oForkcR^d<&ev^4i4be$`0iZcj#D!a(=jB>)NG+3a@I z`4U?(ZUkv7#)}AMWXDLx6&U;jtg2FVU6AUJhn;kP)fllf}A6Uv9qJhHCvcQJ`>E-hq)Zp`qeQ8xp>ORCdI=k z1y`g~h?|CuSf4r)9^s^Evu8KnDovi@7{wz=adS}E_`tzn%~p7nv!p4segj01jOWkD zL-RZCr7W)=(pqZa(N2}tU;AvvIGC|LhHSQ)U|(h&)nlFLBvum<#0VZo0#Pg7BXxQ| zk22@w@y@UnPrA0g?BZs5FZ^IAM)3qv(A!$@I2~vN9&^;u`XA6_Pjt>S17m=@sf=q< zJc&}^@01ZFj4mK{9V~H}Uz)n< z(^X~;DVA|Pja>MPu8MiJ)_2MO!qc5E&3IIQM8uG%EPSJ=#{3L2-wDiS+h>b~-0^(T zNdI$}sf>{re~9h?>ck7?>LI$ zBjGttwJqt1v-@>Kx$|D&jP5T;M|Bsw#tf6XLikH3OH+4!I~I|Q=egve0%}ap)Q7i^ z@F=eEJZDK$K)hETCI?~!e?@}R`5-c;xGWlPU$d~d*wBG>IiBkI&R0D@nO8o864vap zjBG;uHHDyxf_6N-FsFUBKG`_j%zqJH;Bs7v->%(xX9R6olEO?`RN$L5FQhayWU)Aa ziX7Vu<6J+47r6v!ii(qbYtCW~4aV_|@o&h;r6-?>uRudj_0Ylaw@#YoW-t(Ge7nda z8tsco%gM((F@qXj;&f>?ZQnkei)r{fvYZVT(X(z6E)+NA z>ZoCBVQ%3S&Ry+7@w*2Zf*GbU{{xw+NYELrABI;t-{Z$F;Q~qou>EohVKD+RZ{`7eq3eeah5Q_& zJ+2#FJ((xy^)5u3yRipO#Oe9AKw>hzfilsqDetjDt;aPn{23ioPekjy(FIG36)0nB zA1LkH_8~Ii-bCT>XUm-_@p@0{qUX&nOxi5bXVH_#GtPe{Cx_UBJMC8e^2xn>;+f0g zEl!x$Aq7AlxeR4oZzUIJWK*g5f3yK5K5G&suXuE}6Hnr|YtP7rww5`G2ry+ZdEQ2O zXx`a4HB0jCE!JDn?S$z1UMWjIFjF4FLQ z+HncH5V66d5yHEjC~Y&<0OX=yJQA)0lW=Csk(P_d4z6NpCX;B+R+% z0qLeso2B)67o`z}tzHfFAd5^r-wxt4Nxwizxj>0F2`m~`^qC!Lw2q<}^%q^L zwAa$dvMC}O@BfnbF5va0O&zh2%>AiP^Q%KhhRQyLFS#7myMT^1XrVNi!zBAMCF7*k z)0TUt#3i<3dG;7c0#Q?Sbt7(t3zYwy#n)CU6yUS~RbF2kY%xZ~B@GmZldCPvo%+ z$!h5ZH(|a`VNQiG3p4exk?;+tT!-)0R>inW*yL`*{3bCuxmbbviB(jh=Qs)9a{O&c z_=X@PcHwYF_-zt$1CBpaPmHCkhwnIJntC?~VYG`ZS=}hVOG=s4IynAty1oyg$8Amg zxGc1B1lISQx7sUUj*&ij+ht7S{yw>BR$IL8{?-v3nbHbBaL%-u#qO79wTlR5WIrSs z%^>W5?Q708k89EK@FV9+%Sy5mQAHSI`7v4OnJwgFgr7J`+7ij`mIty6N$oJApOUEW znt8mrgqqei_I~Eff7CT|fy89`Ib|v=5zT$@3zs78`THypBb0Idl3eGbp{?48W=0%> zE00B*4oqG)R*K#DmCJAdx2>%b3|QWQU981o;{BT9ak&H62j1Ue9pdx|bI7Rpy|bh_ z_6lnk#W46k5MJ65)3EwlrTUXpcV_j0a0+~>8U|Ngp>OTU7$e$YjsH48mtj`ezMKk; zR%ZeGJ1B*vu88Zp6lsgs0sEIdz$VA_C#Xy9Z1}==nBBVe;IN@}MikKq8d#Y2#p`RShP(k&Ce{ zNsOLgq5_;z-k6k}7!TWmr_R{1Z25B&r>*XIHZf*6^4QsB0oLemN_sJc8-bZoh*b%+ zJeZk-`i(bp^0XLx?Ar;7P}@ZT(ui+P;_KnLI~d8gaGFg?O^ahrv=X%N0KrgiNmRC` z`G>5;sT`q(#pJD=DXpff;Rumklt7LA)+E2ZkQbB)+LAVYxJ@E=k_w5S#{DPc=9~!k z0;~Rv?L81{*4@@w(^j=P|3m@Em~Ka=Xy8bzMg!;e&U3;BP7cOc{*){niHvafs+^;U zrfAQFJ2+LE-ByTnFM%21X(Xg7vMG4pa(wAEG*xuEGpB75I~dc{{Gt!CiE#$S;CLLz znpr(x_3UFf)A`e8b7p@Wle5)y-H}1%P>uU6a#Q)+w2wnJmX=XE;cRD3Yj3+>2aN^E zDVUL+Lo#+)t3|R8-hp-t^#yCrmgW~W0AaHWLo>Q_Nypa<-%Ganj^O+Jb-1HbPvEC( z&vMv10ocI3_Y%Cxawp1i1_;beKY=r2xGlTCwQzhq*5$jii;_0e86P`SIbd&YtE1v9*J++lA(cE27b!M_MjD)%#bU#K{e)&c;ZmaK4jn!w*tg zzc)ZCgEPjvk&z=RzlV*t-o;UAm=Nynq-oP@9z~uu|AoUE`2{4;1q`A`Odw$JLMKi0 z8TaupDU$IFk*9Lgfm#ec-s7vG?&SYJHXWSDL|aGEI4kmns=sgT!EU>daUQGhXmw%AawUPPz9C%X{QKj(sdl}V;Y5n^ zmqiY1g{>5c+pL-I-%BWR1W&XV7iLz&#V%2r`z}KhS+dJM;3mo?6oqC*3_+L_br*L& zFLeRZTwf6*vSZR?f&MGi}1}iJi%# z8R0MqY0TJM$RDA8V%Y9%X~vA*FO6&$5zNR&NJjU8nRs*|NSzn6!bhDo&2TdyVYQ1g zs?qNtJ$tqJB(EGr#4`@Xn}Z8^r!%L$QNs{syT}0=>18CPUN58`>>ss=s9nyRX27hG zci_OrKSq8o_3{&NJkdeURJi;E1<2#b!{@)q0h<6G)!i-Q1(k+2$}$oDu~nk|6}E^!Z{gk)5ENySO4v$`Z^X?^EPOY`3+ zoGVR3I)Xf!Um2b;UP(qy5OVwj^Eq-fHVbEn1k#ruW1gk@a&$#rKwwN)8_O*h)?%!CE0msU)8 zzRCrdN_%(%)rYsM3JR0!0OevTb}rHdAivtmw9=^#b)n8|i3dA#nqxS#&f_EZKpO24(iYw*Uu#%&!fW+L ziDgVnWTGEt9EZpb@7Rf|^@tPX=bUzHId9skWC`~-5KKfYdDIGU6QoT+s9&b9*;wCx z^+r11ajGznUt4ziw4F2}2yG9Pkv)WDHIS9V!nSLn)Jf>$<#|lPAvpH|{Cw^CwhSn8 zl)o&qm{=W(mG=iSUJ3^ZEQYR&k+wWA7ogDFg`pYU3hBz$g)ZV(rsjh{c z8o`Y0p(MKj9xLXnKFm3`CiS7x8KjD0;14G_RfD(LH1ey%Bb*~`4({7zL^7U7l858b z4?RGNiyorzDCbJEFdRYqgdM41c*gi=WV|~VQ9Zd(T@lxVYC1mJMQJ28Epq;K8#cx+ zh(zgpChB7-Dz#>|vuG}dY}4|wF5B)T*)lAVD!c4~HZdMYG1#lk?-9c=!sDGftykOq zLTnci%*dW#WVW+0s+$_&i6=;A_Y0X_L@*A+t7qSwTas3s!?g*~j8dVzzyH>Ig9+#HERd~MBPvM5O zt)(2Kh>*J;Kw(n-HKmf3hz3E-9Ab5C*bXmn5z<9J-l>NpL4md~6 z)R#ExA8{NBvGXDa#YH3|`Ueu_2F)QlAt$;aywaIYde9h_ zas4B?u8%i4%az2doM1D)T3ay`gPRCpxc@|4{GrIn@h;D#lu#erVtBRlq%FxEL|VW4?3~6SZ-7gPg z7ZJ?J{)J@X*>0j3+1S#O9fREsuXnn%UVxahj@ljaDUW4rZy?)EVyo%-#Nmxjv*pCw zO)(7qP4+Ne_RcHgoGc5hN^V)|*y_{$y0G!Wh7cpK&5R1{0H4A0Ke zHzTKLzug7cmLx!*3NJZ0V|)i0bE-<`cckRM)9KPW>A?9t%gKKixj5?;&8>0GRSoZU z#x$K`DDr0QLM^xmX%G1x64SewElUuAr#GB0Q2kVgCOq&*$9IdiMA zf?O2c*5Ie`AI_dO)#spuK6hP$HYxs-Qc$6a@_^4byw5o|?Sc^YZe{1*agq#q+G z)j%$eTBgR2J7t=-wfl8!WO3dEGqO*RjJiGMRUt|it%C%g@W~To>ywz}V8-^}WTPXG z1_jP4URjpIp+4nQX(7Y~tHNQ7Y^mcJ}em_Ywafo=wps5mB8sdc{O{Auy-eYB1M8OxW+ zg1;;rVsaJra~7vW?R>@A(uTjsK*(eIRSwd4ze-+FsZyTiYU^4#ji?{kl!Y~?q9;w zYO#57-~KyJo@OEsjP)z--hY>zg&hi>6&$|jY-wY$PoLCNf~{%4Pagag=0ZzL2|sYU zG{u&mf}~yZUl^)U|B%$23(b8!xiZ~i?$ToTk@IfDzpp*?4M8~V!r_eY$3|!lSJX(3 z{@b6NAfX`$pK^{g*{+4^7ARx-Iho44kh7Y7F1~|bIAz)x$-lUs zH<<%A0e(pVXk%FGkF}b|QKTaL%8Ao#47H$3 zTNhQ4jOYKzQv;8AzIwB1;{x)xPMx-ZG(dSuyBLIS;{1-{To2+P6C(WH88_pbwTDP) zKNJuK`v<~ueO#U^ea#b2f%4VJtcfJDGLjCH(roYWCdhTbBGWMAa_Nz0Vr96lbEmyk zyIDBxf=EVlJ<^Lv>CZzvqlgjxFHExpXqRDU(;-1aTspo@M&wPqVv#kjN!&)V4!ktjY`j-tbXbF z3pa7XG>wx3l2@0zE&!VZH>CvULIUOv-~qFv@aPt_WAVhvcrzE}f%yH}VYnCJYDIb6duwv#x} zfFhdl-Hv?t%fdWZx5Pp-!tI?iEiq|8s7D^tnE#Z_HzYG_m@dtRJ2+QbI3(}WZlwSQ zd>Vl{DR?b2i?wD=vk~||!|6_yRs!9TgwLu5@7Bt zAJC&^WX^V`wDp*u$RkhZS9(5=K-z{{(65 zW_cZUK_sKOn>`Lpu1=R`7au>|-Feb_qu(t=enl)}x`0e{4VoCV$|Nb`LMKbp$_7w) zC_^*4A<~@IYcu=Er^ zh0V^KrdBfyq0TNH(rCAk7JqUo7$VzIAy76R^<%imiPI+Gm7cB49!z82O6I~g+iZJ< z#nCTz&a^SqS0yNxaa}?#PFneB@ZlYC8G5M`roGq77;{54>U)ryCO=9hb!qCe4vZ84 zT5NE$trPBfg7gL?uhuTgs78M;($gZr(8>xg> z*}soa#@g-$U+$#a@b7EeV1^)^c7a{TrynpSFMpea{?HU0+q&tUVmr`s4G7%HK@kDocT zWl9KP+C@kku6ZnZ_WTzma1&&Pg4p^cf3PZvi&g~dg^u2#yZ|QS5@n=qZp&ce>6w0rd2b;6C$wCyv>h@-iZI?ui|2bsbd#e^ zIjHWXV#9o+TR)5=e8aMHr)ep_TUh;ySjO}aGI5EmEl0Nrl9Re^?3?ajDe(}TGfhcl zIQFQqY-$14=sTom+~0C&<6zzHYUny&nhT+{A80s6utI|KL6Ga9t#(Vti{-5BX}88b zZRMy7a5ZjQTMgxa<%x6Gl|&}tF-k~XWCj3oV-Y^I7>>IjX}ZYn*O%SYErSSVWUC~j z9oHw#F+U)}HBNq2QY)n9QszlasE1N0+H)R&pp}WNGT6ghmNb`#!AL{c#drbJs2@&h zcK?ERmS$)4L_ET|uSjZ#H9WEzvPtkrO2AcSQjd75myG) z9#PuA@5f}~J&xkhh@u+r-JHFWCl`gsyHp30nkvIe+?Ev@6X^*Qi6%NDu-eN!MR1v> z%M)FSG?zuZU7Dl_WlT>Z6UU{+ImgQWw$^*HGp3D8gAq!*C}JA*Q%KFVEFH{K5?An4 zXWN|A$_3xDKp6Pb2+oB^wvx_jJUrcL(iV>yhCEhw>5xYI4ASz&O`{IYcGv?)mJuh~ z4S(*8X)_*g7p@|X(L9qhvXzRCvDiEwp5^!#CpBDYMx9X@gMT*R*?2kcIsEbJTzZaE zrKLYQhIWb8|D)}^0~;%@E}lYqA%Q?3B!Pq^q#$MD#=_qpEc?8E=QlHN%AGrRW?JHqmFNQX z@T(%JPXQH+sGN9JmJYe68Z9iamG3Hy41KfAA` zwnV}-4ZtvI&p^^JPBE-3e^{1&=H!MRtN}@s$}=rFVPQVh(X!w}hbZCC0$l!!!ve5i zVqpQ#Hgs4wuufP&Qj_H8fQ-Qrb7=Gex)P$@!E=oo<{;>vJg4+Xd>)9fywUOKormSc z`8D@^qlLAhROe$dguDO{tcZ%gP2x`qWsGs|g~m8r{%Nq`nQ+#b2njC&0j*R=z`Zg) zWYogdN$!gc6xPMX8d_W8N0TJ>CBVY_%YTb_+|N_H7!*M7{!v#AnC78>OX{rW~L1`(I7Hdmt~suD6pya5PwMeHo6rTcZV@8pns zqk*>fEl8PMJ2_(cG%BR}UXvs6O#q^{L@Pd7=NfD7&Bh8F^Li{7sSqh|0RT#{=15_Vl^b~)OBC-mWZ2xE?<cL$n3KrmCCeH7;XZ7bv*hQ4hirY*e?w|e z{UcC6PP4PNs8=mv)UL;8v$~HOEX)(P&S|e5vgCgZd>B3p`{4wn@8#nL4eM|G+dhj) zA!0rO3|a&mQPdROCk+(##rVFW_z;N^_9?;;XEm3Zk`p4u3n?f5f7)PSb7lX&qB^9A z`V3GQbKY208z(ViGb$d=3cA0Z`BIjdYG8698Tu@Oi_9b=TBUARjtJ z3I77%*la+*k}UuyHb1*BS_*1WI{TlY-zBC=>H28`!Ss9y>A9{z7WCG=gDtt$<;zA7 ztE=g)0kIGnUjYLJZo%SNw{LYpR(qq>v#%O3tba3|=o9;kQZi{((!T~e)*_;vc8T2> zya4{|8{|hyN?%u%{BM8{*B5?Mrb03{`^$RuuW}qG2J3GQkpkb*_EBXDzJ(NM`O}#_ zbE)k4tS-x0b9?1_6!&eThgJCA9`dv(;okvV4I}SJult3hoCcMC*T7-pX)hZkp-1NT zz{ChHQ#-o~+pmXz--zeSzYiX15|hKU5?Mb03)M@fbFyqQE4xq;YiNEe_d{cD;%+IE zXF`62gka#2Q^A{yTd@1Rxp=1gvBASK9KEe|JEcU!d73Ol z2>2NQs4+GgHD^}$MUkpMH&EC)6V%ZB@L@C-B=i@6c9z~&_e;ZFxzgK8&aap`cFxE- z9l^gg+78M7;L1r28$&dg{Aog&^xuFEtD%jsu8jVsY?7GfXA#4{wHzm&n%6|bm5PN4D=Dm z%Zf{?5b-AKY^IWzKi$7Ki16=fW)d<){09ik_}Z&8(U_5` z*0Pn_^i-iVR(Ah2TG$*VA>%7iPg&Cc2RfE6v4VyG3i%I_rtOR@E5{Y-Dw04S71!Uf zMSl#{yp*PcKk5l!a;_>IjjQ2rNBvo5v^L7F@oJWqizMBHD|M$-DmC5uOv=@fl%s^e z3W!y~hHJIuqTVAc31P+xB^1qvSdYkS5NV4zaH&*RTVfYp(~x1Kok=Luo=lL;Yk`SY z%RaR%aX#vUyS5R-S}g?>$OozscOBp`qWfMYilK{)|LYntY#2ysC^q#IuS)XuK!zn4 zoeY2l0{>TcedC7Z)H_xDaL}bj-VMM*{}frOv-O!Z`HnX17 z8(mvOPo+oPQGR!DtD%|}D<67FGa)xdLQuf%{Y|<4Ei=-d8=a}j z-}kzs3?5d%k%F?j)W|y;JdEpXP}oM~Zepmg^5x6Q;scb3IR=<020U6;I@TCl!^$#o z0Dvw`QTwLQhTV`jOj3>L&*;sTp+aQeZr;02R zW6n+>3~MXmA=-!Y3}?gzb8~}*c@$D>GYKJrZUGSbuc)C&#x8o~#!MbvG9x(0G6mp(P!?Tm1?{A_Ud)Dcmk zB9uM_(&!jachum=`oQgt5NPnr%bNbA3K4ez;#Pv-49n(S-Zdbfiss54jk_l(XI+Y# zh_0V@a+tC^A!SLkrMj{<-<(rB{LYq?D`B?8dPJTIBr2;PeQ}PF94nH^`Tx6&QNxaO zNULb>(?5!Yi9Zc^j62Dtv1;pbxm5aeLx(LxB^4B${DDdmdIq5A2<$9Fy)}G`0mC|i zo(CC1iJYzAsLw2MD&(ZxVJ0=qooTc%XI(NkUD=C72|f$pJ0OD{v3<)jYGgWpIE6b> z&bH*-OWqsYhC4+xNu57j7srI2gM@M(i1AJ?h?OHH&$Yx1F%L8uLe2w3edFyG)(LXi z5w~rFgdX2oWJow41cYrzi$xRe0@FV$tT=UX$QPntCz_!6g%H0P#5+D_VyWXUh6|fk zbxQU8da*u}a#th;CeQw*mA3o`;{YRs+iv`@;hOI&mk*H`VOhX1eV0?XB~C*BbRz}| zs~P^?D0?PDM2?6R@#Qz=HDb{yTY}0o9ZEVkL*YpXt5G5c9<(GN6Bo|8lBow; z!~SJoQxZa?ltIB*m?%uSrWZGHO&Mx?P)4ZG`E9%$ffoUIiU>0J7z>M2a&HH2|C-$A zcFE1E(Oub5cNB_gb^;rp5hK15=K zO#^mnf8lS;(g z0~jpNN0YUpL{h`JNZ*cmPs4^8w>@TlJ%yQyI#L0{1M8RKKWfVs^m`dIENnlyaOowe z-hetLVgB6c852HRel~dg=$M41PK=&~H0B|4O=WZhx?H~HuXvs_PFPHPETPpfexxGv z(**Rk0)=xAorIq+<3b^QOb3y=y_Swkg7P~fGFmE$h;vH!B{6;bkiH{?g_{7~e#3_u zcpb$J4GOm)jLwJc6`A+0Nim7A4lM)brN#@ZtJF%h{T{+h!~y0G%lmHLXhY==8WdiD zaKgtkTbFh9%bWETw`h>Cj=V?nGcnSZK*OkkmAkuhLLUGq zqNDgb9d@RBpn=02F4XDjK@%1-;SXYtQG4=-;ezm&F=18v!3GZN=Ta4@LyW+O0GQrc zf&;dqoh1)7Vwj;AZza*p#*eWk!4Cr%hGJWN^%?JFcMmsWSR;wvR$NMnoJW8o|0QCn zqakrge9mj`kp>J~ZuHB3aQ`SXeoU$o{wTl^^b(f@@0pEC_|b+h$gc%hTebnHqlDAC zOu%E1fMZ12LY;hsezey5SfhtEU8*RX52Hy*f*%JkdJJ0C(~GUcQDD7$ykWzdE;SU( zhlC)ZPXH9wx^1n=h^uC$4%d>NVubeOo@n%9-y%1Wb@u0UOEVo$LON2WiXO&aUv^J6 zc32bc*)>OEq&)>RjDd)8=-x{$Aso@yeyYLm-Z$gP_zkZYxS!fkOxn|sG<*;w4S^+$ zw8-NO3@(m5-O?2{Hg}3?wdnj|lECCV1IfWEE_&BV4YYlxvBRcOsXkRDDE=&n!(u!n zJCI#-Z)TVLA2)72+Yn*jobPM4_z;N^_8h=q>NDH&|L7zXoCKjqtGMSHILw(#tw<j1qvYKp9jz1|>UeO`ylK_n>q2FRjAU0rR>Wu~VKqwb9+9yX!WH+}M{ zLG?F59d3BET=~zv*|fvl@Tn#%0u+7=gwa(;wNcIt^_A?c1_)ca=uw-gMb_K8Svs|H zZ{HMFYMS;Auwcwbn^452mIL76tJm$A1`Mmvk%)eqUV_ZwyFkT+$z)f`T6P0{*x>$1`HR90Sdn85D$3RL>eafpV%d0E7{mt0} zvgr141BPXhvmIZk(dOioB*C8m7_peCm0Cg4>qaktAa47NFYbO+I~(n){k7nt4E_#x zGQVE}9k!ntix~D1v+BNV+%U(nDfn`x{YpI&zXBo}l!TT_EU*}58T*@5sIM9|?Bm+I z-jNbN+TWUX1=-ht#&jn+9opHLUv0uIEImHf7{6`_2wT~pmgbrdm}hkeGymT}B47dU zlN4sgzBnJFDoz6A<~8d(f*vl;!6Favgn%A!e{?u?`zKPC#nn^JNe+D3e*fe+IuXH~*%2xUD;QlwADN&&GFCYz9 z0|%hUpVXEEm{%GYXHW#3O#RZZcMrmLiRzc^`e_do)AuW+?|6ZA*vkzW{33_%uPsqw zgF+uGJXR(DH{fFsZ0}pZYDcCzZr;D&8aS*OQ9=uW53wGRzXKB14ria*d}N`fHJe>s z+0SqBd!zSvy3+SZHKp^Rr!*7t2P8xzPokiXk;cs0y!)ep!x|Lb%L?X$N)hxYf^g?? z^YCnQzP{3!aep>YSRb9bG1+7Y`3oW9n5l^mJ96v%)gWQ<>j}3`tU|`$z_?}SryFhU zll^ANZ})eDPRcJ2Zod3n-QTWDn@RZxk^ujQ3}>oW-9(8fK_#;$O#(TR+bJl z7u|p5XIHrY0j+(iL^apjD+?>`DpFcLhO5-~qlhP3oOz40&C`%KIe=?> zC|8%mZs*t9t)97!0c2kTY)rwI=I843jfI)HM!k7>*|}?OI01MK2S3QQN4-kA1Hc4a z3kkqjjen0N>RR~MwnXd|zHz~_Wb)s(!G^)Qu?zZ+V;y@nX z*BoW^YGBU&tpV%zG#vz{;Ao@(p@?nW?tByD_U~~w*4@ZPYVwW&5B_WE`L_7H9&60d zk`C%uy*jx!1s9%eaoN>IB6U)aXKrzffzR#Dj2qga5BjpEP3j4t;+y(b zbn^`pLvpIxB;Eo<@ABdhBHw($-O|vZ9cTzT9!3CR8cswSFpW=?mpjQ45L&kqZ};rN z6aP-~tw@$5G@GtIgCXDD+Q^~SLOd|^1SiA^J{e#b;PR^8-Q?qmZ)50b`M340$G8!P zlzNK66xavWsy5P<=YJV?A zT*U@-qb5!0d4R&=ofF5lo>Dq5rxR~8ZfF?~;3n0HJ|E~K1g$3i1%~WD;f}MK7>jxi z8IUFGLa@+>i1i~s79-csaqeQ&n*3u(Wgh%l(+H;Eu1Eo5^g1UXZo8$Sf7Kddazl6) zaC~J9MEVkwe3olhF$V2A?SlroY6E%F! z92vX7Kr2I_nhsuYC2zKKl;WiB0X6DKQCZ!^2Hm`l6v6~v0wAiEcys5M+R~T0yBl(o z>=BbD^d5j7DNxtME$-q6Tv(dJ5ux`q^k(^lItWZb9VtLLYfsnO>+WUbO)BR|n7{^r zI(0|jY26=9G-J@viPeA(Obu)=PVg+isK@fEPN3bKQ8%;eraZY#aMh@%FV^ifaM0%@R3vUgtq}lW7cHB z^jk6RCYgRzCUTX?rmX!h%c_?^?>hHT6>3J?RO3vacW;zGu3>?rZGO zoQE{#1cCJXfsVO5jYHW-iB;PBTLMBeKcHQu^@)D~@YfW4v^M)d!xrR+n=rNTy2L#Q zIP}+I>dLL;a^IJGun|K?3Fbr=?t6Y?(j;a->eQcpOdm= zJ`7BldI{g3I(8p!*j;_G@u(P5vpOhD#Uqdkv@0KC>L=@uv=oF+)(4e%LY?GCfs8LL zi-Bi&?xQ!%9o(Kxo!rNOi#`Nyw%1xsiDi+S2^QQl4IKKJ z2Y_R3VxI*p1_$(*5_I`&LvGUG;B|?64sfW;XkOl|eXbEV$*eVLLZ1gHYE)c0?)gUD zq@sqTJQ09l+@S8-j(EB9gp_WzoLyQY*b zsV@N)%|%vaaB>|!=z@ExK{u(nB-M$28PITYEIa8ZWHZCdjoQB#j>p{ii}f@yB4Gkw z0if1gES0O%?3IQL%_yp7f4g1R%99H}uL2kSu-tRC>K1+f|LP5s2lf96b>{drAfrwp z)v?FnwZ;yetqdsWlsw_DW42`?xX=)%lH9fBUT^4eN!I$7>)`5~P$lsVAYyn=hZ4Fs z8g!Ga^t3+lZvq}|Nun@p0nWYI;GqNG;3}2`kp33X(M78B&QJT_YV1wwB4cf0-v%ry zcQSVL?Z(|CBO@VB@H+rT-+WjW-%vl@JB_+Se!PC&3hqCsOys+O^vy}uSQb~YuqR>3 z?>25|^%~TaXXPe#(AcDRtlZI z^&0q8kCYF9fuA3^X`Mj*`z@r5+?9N0Ag`QLZ#>3hmF?1I|?pCs1o%NpiqJ` z)m>V4A2n9!_Zn19RF|}mfrim7UTprj5jSsiGj(!50q%{JiwS#O>aH9rp`Pte8b8$6 zI$)gf)>Hz)M0^T~z*p517(Z>??ee4bJA%Q3O!6?s&wzmrhsSWb&l)Fm91U?2P@Z}F z9JuJ%#hhVu^5=~l+QAO0OR+e)UjP>sN-T$2*$*O16<;)L{z?|CX_EFO(j?wqI`pNz zd)=3fcBL0&Oq95<0Ec1A`?vgX`BejkwwFPrnouYCYaruu!p#^>Prq*9O&X_TZDPLx zEJlxLE%Td(4DDwJ<<$$5_bu?yB6S-4ZKG{cKMPf&z5^7-5G+?C0`|K`3oXvUCRe0Q z=J&wVnW1bh(wL&}8+4Pb4>|Kum5KZTkf=DW(P~Gg#}5sf$d4Js z{{eV45$6#r^hYCKStDDnnT>;m{san2S#Bq?$jd()E41qvP@@v!%-&xB_7f)Y70PAI zK79JGh7Prx2B7iTO`h<-0gg}WC#~w(`n!=sM}`69Sew{?0Lzg9Z@YgQbQ4DgC=>ZF z=F~ffbyE6ogNAyMRo?S*Kfl0WkfOOX5n#=QY-l1+B)UnK6&C<}njt!oDbZL`*bKtjH>r+MnaFDZiP=E2J=>6zx0{!_YZ^0jObYqfrcd&{t9E?|#_`J)WI$-{iiApU6J zb%xTEJv9=>+^QFjoSf)kJed}JS#x7Xd=$f3q_ ztlV=jii8Qg1%PN!a^l;JRBIjgZ)wobS<0a7Ce%qj5o8Poy8A;dtCKd&9o&#jo!ncI z8!xZi`u~Az>g1jbF2Z;93n1a~V%lvi0ioSU$X8A=n2g&Z8F00B?%!&=+gU354^}if zLV}m0<1gC8p8`C-x^9HleIBV1J=VS?r88(_i_k4 zi6H+@;G^KVj?M7bw!8C&6EWcPcOjUFQ;`TX_e2z#%NRbicMBuaTb#Ur{`TLCT1)19tE$TX({O6 z*y0p~6uK8D^DHpY*rG6CxzzV;!-jV4gS%K$C-)q1VF}A2EgZ+sHSlIyh%tcd^O$Fh zWZv(!&Cr{dXHzHld~h*xV>XYl-V2PnNh5b8OyGq8qABZf5mAKPT@1RJO*ugz{jQ*6 z+u}alkSLyXzkcqIZgSf#8M{ym*1bzLuG|AUv|bb@C5xnBCNIamH6`p<4(f_N!H6Xz zv@`}Jqm#fS$#?rA`<%)ORzudx_;qE`WTjkY#c@$4#?w!MVFxD)t2CJ$|{T(cGKE zRgQ(CPTz@j2Oky5t@yih6pJs?3IfNrb5Cl5MX#tt<`YBb-h?9 zkaC@CB8Vy+%G47aJlZ7jhowe4GnYA>k4b;D(=Co7A1G zo^f|$^uI5~e{jFN8&eWW!h$?aPZp=!lk=6jsf1x7w@+oP?NFiScBT7s$6%uAah`vyBW&hHg zXUrKR=nLtaF-5Lwq7pexa4_DYQIQ0JC~oT70M;I-+&=ss#haQsFBzL(btQ5TP!(S)4Oy-v>tUZsYcoX(BSu) zTFZ$~Kt=)o+2#!vQsFU*%Byi!8&&9G4`kx}~2^#er=OHCU)MY^VTCVrgMH4z_ppfF~@og~Nc`cIK zF=<4rs?XAjkwV&2XC+T4k+TX8teL5`f?f|$A9G8WHy3^9D4S6YI2!Pkq8kSM*|{=s z_mU*}5XinI<9h6@bB#5h$-~C%zla>QWO-R^kH%X3?b3P#t^t@F3Uk?F(Oqt^b3-!N zVJ~myOuH@^Zg`I^e`iuJ*ssB|zK3ko&q+5{8E5P&x>A#)aU*`+* zzD5kGEwcO_7hzh7sQUrs#}N!6o^pSqgp4CA@?MNF6%rl*LiCYVrC0HY2O1*8vRlE6 z35Mwq@*qIa?bV~ssH)eQx(6F(dq7=~K-r~PIbLIFk*u^FX%8WdCRime6{F-HYPA0M z9r^muYy9v+k#r7Kq8w6fsC4o#~LG~E+a@W&P9lTKf2O-&@Sik1{wCl=jzE$=LvC6a`x4qXpoScq(8Z-Fegs}0nSWT;n)g#vT;Ic zSFc)ZTBJP%G+%Dj=4AzOpZbsUR6~VWcp8q6)8K71N{W&7G_ZW<$!`=j&!-zL#CqtJ zv0OdfK%W7YH&k#3cbu_j8Y;+=gg-fzv6LEf_AJo6ts>nU0^7BQKigPAV?tsR1ZLt1 zC32nvj_(V`n~NUuT!V!41>#?eKN2-4|2)Y1zMv|ewWv>fz5zn|f{tl;tV75P;yl>k z`9ed4?NrE5@<(ubWL{Ucs@n=UtyKVPc z!}QN=KTJZ8tk;2s)hM~MbydPhUT>J;bK|8*cmoNzret}=y>Sx=UW$Y_fq-i5&HnZl z_hw`C@1gq@7R5+;3-ci+gUlM;Ta7V%NqH#}-Ub4i(EdidE~}UB?S|+-ruEB4q(#O% zz(5QbYzFsE1N655`T-zB^>;A`k@e=@ZGho(5NVO|9x%|)AI4SF5+c)VyZ0JqJBn}J zrIvpAh~-FnA4nQgyu3WW=H73R{v%dDNCQgL{{V9$dx9c|f%~9AhR;bXN79EtLd}ZS zszm)yKp*!V z%PoEGlcwH(gzxv&MTgp-f;PT{oCP#{Xl~Vg+VqG25|I`ep8*5?A+A$^OUHfIAj9{E z6eHzxpkUK~<-TsHaiI^_F=7IfQYH5r z;G!;*_D!P=UzZ~-GQI_dhF@aOru()LhHvY-L6iF5p}r4^`R*p@s}l9U2Ys}~Lv8G- zk>feOZ;0XBBE?Ag0VrrGavFL=Jr3@Nh8e!4m>M}h0tY?gLSxze*a*Y-jG{yBpFkV6 zNgORw;o9z}raydbinPf185o$0!aYCNaJboA4p;oSk#h2H!?lu>B6+_65A7bEr~9R0 zhHv)}qWZ6(jvm`L5S*qp!%W&u(&^Nf;xn#{wJv8TloIV{n_+~{}x_~guj3QKcP(VCF}*x zmwESB!wf$wOz4sIH?Yv_%6Sz_huq(dGJLPAO4R>{>5CV>?f$tL`cR_&zo3u4L@s`l z8zS27-$od|FNw6s_zxIY6NaoI{@3Ksl^+b(fh?NC<9|@rx-{2XT4=kg9M33;D^ZtJ zifO;9C`J}yYX$a@yP7eEAC)A|QN>8PIw(4J;{rZ+gdv8nMbZN`?dbMvKp*8KHm)=W zcTEEfUrt_%Ik*-G7+1{(a@RJ<@Z+kfk#ikzP+!|?ZHeBUb=Nh@@bxv)BI9~sp#P9j zU7{JhJ$-%S4Bvkw^vJpaSSUBwI25f--_S_Imz$}PbEJr_YQZh9ZIdiD+>PWXSGXGk zrWXq{Jb$!Ys@1bO#7xvjirLZ*jXyfd5>=GM4sN$pQ&&D8dP*~eM+-q0Vh_m_AIF_# zsuC@dt?nu1+)a!gHjbKv;_b-<$vg&3ct~6`vol{ADOdCESY!2f^m5FWSCtY=Q2nM* zN2{C6*Cy&^cbqAo-M0+=@}W2e0Gs(yzH;!eIsw#(}-P~AV{S*Y0)Tu<&Er7bIpd{Bzt8TWL zsT2#7g<5?wKUpm2cDP#_alCI4#vdori9S(JRnkuc9koHP>CIGY*@`>Kfc+Qc2=uQi zEmk4mRsf*$T54x@*ULNHtxf%mpzO$dDjmH~-IJkXVI>+HPExJ1zZM*!&;2y85=u&b--U&$5F|6OtwlgE;Nq1)>h1o>W ztNN8<84^wfA+8KQf=F|eG0u>cA=V=7G{8=l?8vbtaz5>>#F}Svm3+3Aub1+>>Qj|+ zE?=$I$1COODR;W@`)^xg9Z6_vUK7vjF_^A1kS=tOI6Lg%-gZWU4#z6_yxU@F2(#Y! zzE40}jIgbM;hRPsXm)g`JJUE}{YG~;EGh(?1ps1{WHqujHCD`ySL+kmo%#BB&Yf+b z{?>vYR8UEmW-j%;NVgh+=KzS=f0UzQVMHtfcdoI*CYs%u@G+n6VC-bFR zRW=<|Ywm6a59_cgtNG?be+HAbgV~2^2yd#m(8Li^v=oGW?v#aTGK5S50>h%cGYSuh&Q`{&u4bhE)>)s~jaMRN8k9sgBULju zG46I6XV`AW3lXymnAl$OVbnSb;hI0c=VnNIDb^xv4`4dtEgg^;Dw%1Q$HuDpn!DIo zVO6lJUo$D9E=lKWcLgyxnyuJyMqz;qGO;g8bs((Rh6)+wbs3MeJfHbODR9=;AX*};1jc}g))8KmDU0SL`$Pyqr zH;U(M`c;;V6XxPf%*T9%PV13(8F>rsgR^cWQ`{k=Zc&^$&Uj({Q1rgCI<&}Yg9Uf0 ze8t&SCx@1}6{GZzu4G+He1ES%?Nw-}$D?vB?+%*&l^Bmih?qmbz$t;dR~z%0T&+^9 z%f7Rn=m`!REbtR~4gYz%^~hWUGtndLk^W%!RK;CxxS@N5NQ$I;Gk3A$qCQ<4JLBxS z7%TTNTv+aUJ3>s4%qzge;3S_`oK1x?4D0(EtADU?k7_{;>few0QL7!9uI_R7H~p}2 zp}WyShKL6Mp_Z$d$935}kM;O^-96AK=LK~QDf>9qA?88AplgV7QWmGUOn`Q|2OB4> zB6R15QbaujD72=9c1y}Q)1KYml$ei)8YpaJh~AF+goF|~4+96j6OQ(nkBk&b*~%XG zaD#=_H%cgt53wGRj{q{NZ`FK$M?E)PagQ|ChU*(OsQ)PFCt7Q5qC8#APq{}MBJ3me z7{4Pak{$!n2~v2SuA-{U6=jN?uNO-0v4)+H-&}uq3h+QBWSNA=Aql7_^*ZKr8nPo5 z|M3Rh-WM7l8G=PG^@w=uDtvFgQN#lL0LBgt@ysI@lp+WtpL0|r})--{sXii)+)qJ&H zDVJ;R=|<~c9oVt59i}@ zB+H*W+_Q~vUf=4*OzGR2qm&LQ&jAIq8Tt(Q|JZlvo@>ak*$gF=Y7brBPA1`bNCK+5 z9~5OV>iGr?tLh!4xP!_sfHFQ2vp!xJbuTpWFh5pz0g4Ie;HcZ&?0Rb(rP9>sV0u&||=+`h#P1I^r^{H~D=3Z*7?a{xtx1DyHtaZy2WK9AG z7Bfq3-)UP;W1XOa|3IB`1ilQwXpKWi;` zzAmd%*bvo{5U1A~emdyGb;{`n?EGPx!D4tFlB9N<-ego_n z$V5^ky#XZc`CGC`+-P6w#P|P3!-UOE=kV^z!u-kl7cWA09;%z`g1H8ZN@a?7_*6StKUsQm~?|^a~JW$F` z=G{9@JFs-rDd>yG`v*mZfOiof{cbsmi+Q&J!eUhtU3Mfv_4hy>5v95#@x7)SHlTL+ zG?IG~-v?ooxWvbd=0~Q--TO^Eti%%mHY&g*e*nsutZ0FF)3H{_7TpI8a9&XRPL)cm zL(YfDfq$|*B_>t9R;(U8!+qE|Va8j^znKss=pz6fqeYdDQ;|p%ahH75aQUEYSqzO< zD0_+`YE+ZtkAZxYs75VM{DHOX=x9Zjgxtps8@6E4Jvt=R2>b+q313Q~Dsdzw_eo=g zm1sB13lZ}vU@!_L=P&u}RBewqAwO-f?S0F0z`P}rBl0sqqR016*iyY#-jOd=-Diy! zW|?&PaH&Ml=KxCh3QOX}-RVAWnDhE(ibhFy391euUjRgHIV{UH4!2u!7VZ~~5jLu% zoDPu)0be2@s(HEcTaYoL!Adl7>c??Qr z^$vd92w_#ahrhuqF&p0j2gS!ZA$%*7)5Th$UcK0T*KlEbPbi|L$A_dMvEKtW)so4(7k9GIc=?4 zxY+&JSYgGNP*7Up*(;Ly6EL;M)Aogf=CpbJ)PRAR)7O2@Qc48M#&I`&_ch90ag!}~%)K|YJI8&GZ%UoAB z+ql0PDeTKd3YwcPHS+!j9@f|}NnBdS2FJe}Cu|;)nrEs6#s2|uc*a5L^lpu?uxL=Pj{V;>Dqu)iWhRBd{4KQF3rhQZAx@#IHtb1SIFLf?i z*8&SOD|8NZ>{ybMT`+9P8pgFZh)a!IX)WTe1Dtl^&4p#{gjeJ*SO&kY!M00k2G?P{ zm*~a$6v>fyJ@7Dkp>4?>h^>`1xec|QfpcgJibs#2cGov}*ysf{I9s*F_LXKrZh(aJ zvb=Z7))99@<8OxL4M}p31Xt^86tk2qO61dxj2GtkN!i{ApMnYzHwHq#DSSe(_b|Ib zG{8}Y3H+uM&~i0nGLj?jXz=8})T9~F=7rK!F`LVmr>gEIM()24XTaP!t;z%(g9ISM zHYY5Jry3DT#~L=Qfp(9aa5JbDVK*g=0|RCVB{4(XaRv(;JSm`s%Ll3vcRX-9q4v=j zrK)V6xtWo|#+N8EL_yKtp+m+AV4y9muC@?nE^A~v^P|z}rtanj3#&9LqNrYSWZnWy z`7fG*ipQf`tV{F&&QYzGrYA?_yysgQIn05TY8B z?j*y8P4h)Uv)e62+O0r?1q7dFS!}wZyR|{WqKuMGO;w=w$Q4WEPS{o^>Lk=4p4Xk*-8#o+jhGPR3YnqT;q)X$EA-I3Emre&2qj zk9HRrAk1mq)$&Dz@)trL9xs_CNdSq2;Ea{0OQY^C#t18@#3U;rL&{x2Q9qPS9c1EA zU%_=CGmY5;Zo7fPy0O&EAtgmv7BGDI$mS|;t;hzT5#xjzCf)Xy>JX9xIG;*xacOA>)AD6qQcBm(zJU=yfRxIziNH-=X^83&auFc#y@>U=ui<7Ia_()# z5MjQTgaH~!P`wKE#ImVun5JV}dK=6@$`!6i|<&=9GWm`}dx_XaDh?qJsYFPLkj8cU4*7Cu# z-MtJHX2kT^4M{Ds8epM9MeSR+o6Hy|tb6LNwU8lV76^=%h~%5vF)p`wRPwo6S;D;M z3=}q6rh)=fQiL@DgWD?^SD+j6^R?VWw!#aJ41Bk~pDe&jGM*G>GWH=ECyDg>^8z^& z2HQaQTS_MTri8P(pc#SJW@1`MjG8Or3Craxvb9lfU6O#DON}1(0a6qHloVkH0E^cg zanwdlMqM{=q#@TFsYK8MK=4ILk04f5Vqe*!p|%ATX`)L>36ZpvAORyGf6)%M<>5&w zA(Acw2^|c~!r7(eH79qUtvF|xu=bcTDouuvHXwHvNN1pA6o|Q2m-A2N8*(oKjuVO^ zHQkD(VK%5xJH^!$?EGOrB9ps{VByK}%4W6R>0HC#^=|X!_cdH#U@;Yx*`-I~{Xj(JXv=wY z8Sy4m>eYg5{ky;6wgnZXzV5S^5+dgT;9%tuO$&hu?tw-Kv#?Wb5(<=l5Twnd%kS3H zv>t5gLzr}4gZdAFK9=*_m*K?Ctr`FSdg{EehZ?W{cxatr_jUoxEKL+ZU=hdZ$o>|=|3zd&G@on<+!SjobPSAvicpMOD!BIq5QEo>n7mIRg zlzY6P!us1DF=eqDc~1Z@5#Bl>Ge2&Ac;W_u-BxHqjld@Xh%e+j&r0>85%**Rg{=v8 z^}A3Z;3)vWlhSE`>Q*fGRAco&FEPOIR6$~&1}ts7LI0d z5Bk@@vqWMPClu{XyZtmH}+9v-txcYZj z5idl{bAUeFV)9;w^A|K$+RXGA*1t6*x ze*-J*x<~WX#tZ8bD3Ib&jkwnU2ak^on7gJ+wTc8ljJnqvD$G}v7;R02gx7(f_OnI^ ziCb%VzOfdC2)*7wf%da#XntS_%D9^pBk&CXsx^z*hs302r(|t?)VF`iQwM6K~kdlQbHuX1*Drxq7x^QRBLk8^mJ`%x+X^@mPW+~ z@>Zjk!txg%XX1-Ae(n-yO5TQ)pu0_Ue0ox&G>D3OyQQH2^rV01XL^Lb15h0(b>Ts8 zMZ_|ZdPLr7%PwXh_Tt* zMRK~Ro=g2+gKZ1To(2Ty_e=>9^FCmrRSxOS+Y&nYenW)K!4kFAD^U9b&^}p`xxTv& zdCFGmXPoUmXw<2oj0Jh@OrA;l5R#Oz#IwbkI8Mh4wW|BDB_OOj?eQ67HS#_Jo=rnb zdDLrg=x2K7G%eVu@5|0mJdtwou@eB1~_xVH3|6vz){zerWF(rI$=+YzdCqQatn9JlwD{@TxPYtvM&2He~(244; z0#yDPl(p>q!fUlWncc0O#LtZoXzfQUw`j1@Dy)f-_6yKZ9W0!+n9uHTzcf_Xc$J7r z&`tPqTwB(b#eW4xG)dAxCb-1huMM%`Ns@^$7ry}kWfoZ(mF#F?H&$BRZ;cXGW?+pwSQCaQhCr-y0>&228acC{X$j%m~lc#pG7^7XHx?VGXC}Nxf7f?oYsBX4L6O zz;Pv!GokJNZ0KrG&F&P_rqubvB!S8K3zCBnDg5%>(n4nUL~XKI$6*=ODVbTjzgkMh zf>Qz=OF<7&!v6-i&U`ubU05V${oWhSXI~V1!UuQ}5WhkP;*Bs-mfP^ytXlbXPM*SSKG3_+EgzSBEb0 zK`#>=S@L}fV|&~Y#t0j+D4~VMhggruYXEsGL3Xypc>Q~6p}r`avfMQ-1-palN2j7S zx8x~HT^J_pT1Xg%ps1>j$~owA_T9BD5!-?i(OpTs5HZ&Q#3Sd~!^-&*p`u7@Hsb|k16W}`2KUZ7Bt1#fpFLk>GtGA+Vx3>XFj1P)BCI&m^L}@ZWY@_iNDM@ zWO?9N!-O?{Q_wbNPt+svrXXTIhKoEijf(u~jx$tPj`UT<#3Ix`9{L^I5cEQwn;9gm zQ(n(m05QT&0L;%W^r$nvhgdIYx%q}^DxjH)7OYH+v|E6NeuG^r)&S(Zky{!otjhCs zh4H~_5p^O^iH>8-i0uA5$vDGy9FQU6RzM^=+mg2kZf%fZJ6kVA%*nu@4S6rBL{iIT zDeg8#2@7=STI2RQgxnSo6qo-!i4iE5-0h4JR$N^Sl_BC3=7Yw_F4#1;-ylO@jw+eI z8vrt7+yM-@zI;-A*re=E#Dyyo9eGECg>`dX8R`%u_D<=n)g)#~Rz>e@w6Nmq%$k=W z<5VzWSLn_WnUQ6T5H_{!$`4csI1PZrh$J_P)yjH&*Xf1{Yo0yETWS$@24FDSqTvsd z^6ZGLTVeZ{;lie8-MEArfm;boEEkRD$Fg$dkvlUG*nhDoQX}vz0I@zL$CPRyL?w^2 zOzXMP!q}KQ+mK;)1x2*x^C787>^Z=qw}=Y1RK|r-=Nc}ox9BcX)gk1(E=YAM=eBJE zM05x_9}qP%Wr0DK8>9F(cfkfRDF;EUL(GN1;ObCWOu`z}T}=LT`Qc#SQ(_HDUZ?6^ zp^DGI&aN_Dt+?%`JzS4aFF=NjEEsnbM(3P?p$j+~qP;9T+2lyp=HAv}36&VJ6f}Zz z*D0;_uk(lN^O)2el6tHV%{3;GbP|Uq+XmDlGiu4&9+a#uA6ZI{)I6w(0eZTWl|2J) z%wWR~&|Zj`abRGuMUDhG%j5)|-EP7_Vf|(gM+CLVDu5*u_p7WfwpO%TkwZWltMe;0 zS&S$yHQ;}~n_=%3RG22EAJ?9`gqemNNJGN3k{wNXS2So?)$Zv47nyc;4#(j_1zIw)xZRk26zZlGa1 zC@)0JJ%B-Ll8dCpcP3Ngj2wJZsJeR^XuJID;IS@#U%QIvrFK2{l>f!O}ayr@D||ew$Uh|XkMlFj$UfmFgHinq$VLo+yUV5Db4;!S|9TpBz9RJ z2{95EKt!L_5zvY7zeVGP8UL|_e$V((LqtW9&?P|8^7OVynfKPDqAnZleEFC48@Zp| zi0fu`79qA#bYnt^l*>R#jF{5djEy>Hn6SpzJu<5fA#Fe~-IcXnISXqKd>$)?*l^TC zBti96sAI01K9_Fm7I)AXVYP4lgXy+zA?*-o@Gy6*wCq0X^wZs81BUe@>n^oGjl?w& zQF?Otk=(nAYX{xs1_~>^#JNe41l8{i^~A6x=XK!RkNX&4*kQ{H5pxAFsAs$#rpw zCHFuhgqg~z;4+gTY6^Q0EqZRUzUbK)@i1uL7*#+~$WGB+MY{ zeXAg(NO~AZC*qUK(Z;JQt$EuD+}bB&eLg4m+~>#S9^r=@x-7rFey3(Sr!^$VNWxjP z2eqGaqj6fDsd)ragYPGX!Y=VCNo3%(_@^Fe?69&;m>IDO8IJ-3J&_;ADuwZh=m4!p z8zyYX?-~bVEy5nd47K++GBfgx4`}$>V~rG6A@R1iERr&WJdQcCv9>Dv^>K3BiSUGW_q?4|3_BlbDKqEp7`fYnG$mE3a;7We_><;a4MRfu?=ArR#u(>d(aa?dwJ zSncnuBVLA#7k~jPJ+)t3CnPU4P*}m4f@a8`u%8@yB)$klw71SUgp#1A*=6#!Kv_GQ3E=jZIg`8h8)(ixJN_1gi1ER&;4 z;{2RfKo=E%LGF<9J56Sn78c|H2z_v`G)!2?ax9}+iXW4z%+#v@N5`yo)zaMRyttlU zZNxCcEp1qN5dvOAK;$1B!|Lv94G>oMyJsMfA>wsF+*IhIF7PmakxpQ)d2n%c ze%`&?NMX~wME?;)?Qnj-7mwZ{-y5!I$M{kDe5z;d-pb--`?>-OTtchX>i3f zS)b(~2-EfqyhCgW}LG_P79ZfvyH%q!B=%c0|*2KFzPSqjgV}$sR zSugA=ONiOW4HDKncYa4NL&hh-zy|T0oZr;iH`Q>`$TG6%CZ|!${?Dnh9C3m3ut)P# z!v3VOcL#ky*&`#O%U)DJVhDz5`xMfqR+IUSCkv&*qy(UR+7c4ha(Xz8BPqf@1K7w#c$3MA!qUL^TxS_{C=n(QVLOA|a5a{@GgACh6K!%K8fDxGqyGms_cVXQ9 z(g+(i6Cw$!{|f4f4hWmfbPMIL4KZv7L@AdDYm^OVB$A-|@1UOSt>Hs< zzc>A1KLupS_yZUytcAI=QoDlxXqd206z}_%nieAHPXOISK(TLRx|$u&*GDh}{n=2@UBm3DD9e-AHpuxwA1Qj< z_gqOOa;^i88tJJ9CjM&KF=nvsL9iY%RZ;7L9EsNh5i>CEK(|fo?)n=9QbDuF2a_Z4 z1^}W~L@T}3@^nRfXE!uh*!SvO>Gd*%90^EVCu;RcEbHHBgN)93wU;5|#$c!w=r_D) z#eE{C&DriK!vwlvx;M;Ii>#x;x|I|fo#jzHJI=tbj}_!rjQW@yk}@s#Ip4(SQ$f{) ziu%>~u&z84bPN)NHL=)KR%f|d-^W@K!p7HFgDoR+!w!-P<3>jUrr`mT9hg7&5RK?J<^wP0zy)Sh!cotE$rJr=Q0RRZo8WsBCKDG zUe+Q@3XyUPP%wh&9$;D0zoh}f!e~2o5yz*7oCxJq$F8>GNv0o`f$r~si??HI@m9dV zMH~ft`}4POnAfe16gJO_1hl9+#aLV?gNBY!X0{UIC~@mJc1T0+Z)3QyZay(LPAidh zTd+{aqH0_y$z3ztZlyYeoC3(LeTLxM*PJxONTXIwq=cQ#&F9r6mA;Vv=aP6ZC%)z_hNRei=8BZaNzrK^t@A>lL-5>2mI z7^&t;?sP*8+w{B;F=qgSu?@p)PNMK@a?hbSiQSeBf>OhDLWrQP03}+2+@)L`b!Qr7 z*p{F=gq#IPbZSm(PL9xbXB%LPWO8tQPn?>g0#rT+$_V$0dKEe4SHz4|H$OFRirYJj4mZTp-C>*=D7>h>=$Y5B6AWueW4to!mz2##tWNa^^T@hEwZX$CEA#r;8`uWnt_IGW2!^QG$0r*?7Y9p z5d<>rG)&mnve&iv>`BGQ+C`SPMQXCU1XDzZ6;SV33af)@YlJeS>;WZVkH}#{Qg__N z1{u~K@j}F00t`AQ%PS5_ES6=PCN z9Sjs+Y&{_M_g)4Gn{Mvj5CCeejx&{a148SqB*N9>EP%2oprzGL~ z0LOSQHX%$eUHIE?xG=A2!k(lC^;^)t1wKb~glc1c-;!&s>|bct_pQp+RB{89bh(!r zu^9B}sHU}^4?U%skON4_DN!kBi?};&G+)hC3VMN>`l+iE*&TWFJk48@DnUs~m3aVw z30pwIFz&51oL(p}UYf3tl`E6knp?Cqg!KlL&|=|3tViS$kg!(lMlrW+q_8ed{Ydih zqBvy}BH}V2FuFzo6%q(5=jOJ=G31OCHahf(&M-03+MuCJ@Zlrzxi2dQyFmVRaF@^( zeqvHYtpb(kN%JMRa1I)0$evVX$T$RsexJFFtPT{aId|9qVcxDplk^JIUSs-YBt)o# zzdh-4li%>9-b6^aHwc*9+1P~92)+Z@VeUSL3X9}6mF^Lanj~KVGO7XgsU(kSzOOOE zYIka%OG1dC`vH{R7o~fh?{Az7f~s3@OAb;bJpiP5u*+BSld>JTP<9X8AgXJyGby4T z1Qe_i^kjM?Bo0rK-36oW!5f67`ofeHVGjW;Iv1fauXg}E)F5G1JYmXt1!_MG+EIvv zT#H-XBX@T_+{DAyI}_rO1l1n_b#wrcp(0gVI)F!-epn6bVdbb0DUSjLb3FvZ&n+#k z)DOw1*{tJ~Xf<{oZNRX=x>Qho4?%(-1276NN};%^-D8av*1Pq{oXC*yI1o-o)>aoA ztC_6qiYROMATf)wUsd+$;I0{5(Xi!A_jp5xt^aJ~iiRy`GD%NBl34YV7iDd6PqYMF zAW0rPf~2Z`N{X;20j9kawg|0lJ#eVu7A4lpJ=suUbtv^o6G8+%1t5ftSQjc&D%lcT z!euS)sYVO4DNQ8Z4<{taeHyq4ACjCYUvW=2Tv#3I?yn$2#4~`9|7-BWY-V3@omq{w;+Sc&>RBF7@Q zml|f+`VJW)UIs+MF)^K!d11-D+#q2+Mt7s~Ld3iR7>qjfp<|cQuJW$4&#a$)ru^N# z(tuZT>r+CM@K*s2lK}nN-ucEp**Rvouu(hJBdP?&Ujy;TW4?@KI`>-B-mu491*rTw zDC_)9Z-yE}gsuB1G026(Ko@M0TXsBWP2Gt?tO@QEI$U21B zqcWd$Z#F`heU@0k@Cwv^3o{^-lo1%a>U4Ok$%jor5;GhVA>nN#ph7ecw^sJbS8;DQ zLf8^asy0!Dh<5-1uML|DHc5yFUE_bmfRW1?-f6tBux%>o@WY2LStj6JNI;_3(UrP) z8* z-TN&GCHegT>r72fE#QITOwI?892`wShf1yC#w6Atx3nNp0~tAEwl0Uv$lXyN++bp; zr^(^NI^s;shmaW9bo~8kcqr&E`mm)SYypTOsT3p?iTwz$7*4%|M3)R}0J_gva0+Z9!qq9}zey*`Gy%JoG*ZsI5x63aL{^&fEy|)~xp8yp@Y~qZ9 z@tpgl!TJxe{ZA(tx1jM)0TT}w`T6m;o{TA8gjTusxe=m0Z`pB(LFpBE;uUl zv&IaZ21F`aWf&Hjk|g+Z0CNh9_?o(ePssJcvbl1$F}uI%K5yu-{v*{N_Yfrb3jp6t zzGSb{UZP`UzG&>b^{qoDG4L1}FU&N2328t(jaLH?EG`|A-G$f@|7D|x`M_ckE%x{^ z)+G2V0ApBCn_MC(r{z|UuNp6Kfu55O5}VC9IdIB&5M&|Yi0^LlJ$XH(VzD6 zZ6k)6z3oUx^Tvk`QNq6yXA;MGNK~$lZOetxtPotVMe3An+Rlu2mwC< z0JTkz*~>H+54POWqWh^K`VRoC*8Xk%@+l!=eg=$IHXjt9tI1m5hy=0ksONX)OEvd% z1BP`DJtE^fHOc-3*!WPG0cFH%dO7?SzcgT&(WozL29q*m{0fY?rpcQ2uT6i*npU>m zZ@+;!zLB0zC^J%=A5nF`H9~(Q#1GQaA9SB|NG)dLcOb#yp&uw;EEcA!a(3$P4Hh;^ zQ$S0M4^$)W55VEe_-3R%XD@EUWSZ6fq`)mKFY3DVd;CthW)b<11n#0 zvsG_R;{O6XzFm9{D&hnFYQ(TS_ShMp2oCrgAh33DRJmSMmb&}w?*gQXt z%|$s$;lCyv_%+f#%t(gv|ARdIqlkrcSCKdXAFGp*9}TwK6AlEsw)d){p_RzTz>VeV zXP&WTi@TahhmAEI0b-R%hv~jLC{enL?g$g#aJnsMlRq`XDYy9%{Sf|(32vvrN z_63*L7d93PgWAw0s?#@4Ch`22fsD{xN1qsZ( zodLq!06jK3lr1vl6zHS6#HYQ=vEx;DdxM1KqWi2@$S@ao07Vze#nAN2>D6lS;8u4> z!-P%CdaTQ<5J7k94cg-Fyg8sP1f2>H%wU~7$uXGot$q7fT*feA7EH$^j5?&81`39o z%T`+}?sO9mn}Kyi_d$cgXF&J_5oUGQZ8o{-N=4?bbqOvQlLHyI7_Yx~jdNydrP@&s zVJ2cL5&4L=rSJiD@XBvLH{LbKJ+@o$JJuXvmSxfwH#l?qZa% z9;BP%g^0N;F!EorO!ZhBxjiJm3r(Y1mSxoKhAm&&Qteh}TCzwB>MDHZViU{pk#5Am zVVywA-R?CgpMyNwUt~oUD!J*&v7&5u*4q|GjTGkC@3yDXg2d*5MXimt{9?mYM*fcf z*Lh3an88ay<(t&fYMcB)hd2{5j)a^f^6M`mX8i)?grz354Iw5jYCh|Q$9Zigrhvp? zAdaiIZeYKgp~I$@J?nRJ2m20y(2mewYE-A(iX#_*87FMOPxZDY!`w^)0?SCP&|MXr z3E@hH2&=j&6Vz)^zYKjW%*0nTmseMK>+_U>M&(xmMh-oFkPD2xB+0!9Tzn-z)xrX@ z&eSSK4Esu5Gc_s^R0Zfb0ohbI3el4zjK%jOyPj)?42w%JA+477)FDg$G&AZwnG$(5 zHHEuxb{aLTyY2L4Qiz;g;J`AB3Uq#{;cxCnAMD*s7X*F7?h>UGQG1xBsD+8oMJ`6U z$X#rtuy5Dh!XhPtE&&MLwp8HSA4yRC9#D_mh;mY<#6aECv^VTV zQ~@g2p^O##XsJ!k8ELiZtINwxH`@@e;=PQsk!BTjgZ_Ze4oQ+5AY(e|{egMC+kCWM z%2nNrK`#ud2c15_gcfPDpxqkz92B~IuA4J_=r|wH^GAWv=Q2S}Bq(9pNPWW2+`YyQ zYim6#W@2ydK47qB#jRHOzrRPsAC$b`fME-yrh~OOdq|5ik1fEpq3exdQ$;cE)Xccl zpn(lt-`7seTO1}u)B&JSk2xVh{G+Tc)U&x9wvNvmG0ZQIQ@A3v&-F-Br$hr(H1W%)k@q0*&TiY5MdK{6Ac;G^ivb<4lx3s1R$oRQB;*&>LaT{ zP4{GDg$+%efmM+XF;5{zDik&h7fKUy&_T^T)i7bcgG7By$dK|hP*6>*p+wzvMQpdH z8!D`tm`2aWCW@p!gLy-5CWrQ{%;6f5x_hPp!-_EFZ;N$^c@{8O?zMzPnaft)vkevI za5IUXdCOYXo&#pw#Km@qOz-C!aL6WZQq11-fI@$zkzF}CIuTnao*&5R6WA3=k@Es@ zFtnx&M!7k-=3Z#9uo{w3&~i*X)l?*uY`K zPpV;e>oKb@0T!Mh``pnP-+G*!tk<}g8aT`oWI`SF#vdI{S@K^7eq4LR2{f6l?2u9D z<%S)y_Lvk=uK)@Y)Z`?~?=joDMJ8PCm4*x(YN?|cY$aF-}K5_g&@b;;3%)bRRTIm>t~h-HgSE`w(#G zMk8ExV%lGP@?nF8^=;iSN)h!DpkT-$_baY3+nj049+;QiFe~n(M!PWR!=(x^twr3& zfWvq4KF^A5LvbHBOjsaL*8(k62>1j5C^~L*;jr*YqlA4YzOS{A4;=%;rvO7#Q}mtW zB%DthCTvdI{e=p4hTUf%ubWI}7BW#k;Xb=Tz?{6RdB9&H5f;|x07y6jN5RW8ea(Vg(YU`ck84)ja@vCXzH!3(8osaXln)(!%?~}R@hX~WID<;DN6RQ zz()P_Bj(PLEY47q+Yo+j&@eNiYs57vqJ9Gux=e23pOQOJqjMvFYqT(j0wpwue2Ddk z{2h>5XQHDjxjR3?{@$=*y$eYxY+94(KLCwplB%xKVD*yaKN@(u{L0`VVx7tw$&vXd zFtNDqKSur{|0(}$u&~ATu1^^W5%U*j4Q?F?X06HzXkyBbPGE)9{ncoJ3os(0<=IP( z$iD%Jx)WJZ5}Q~OM}_c4-QPC|?6yS`Y6SiRKv?s8SC+*hlC7unb$M~gwcS5A$cq#- zk6miy{R_N=IWKV$vY$a3|G$kE)(*RKM=7HI15^~VBlnK!&cFW}<;dCeGYAHJ<%jPwAm0NBs%&B{=o>F98RTTZQTr@F`#>0ARaqs`pcGiJ$9ak4mo3u?C zlLlL-ZOYyxcDQXK*^+G~vaCpQsIXnFR?^1O?q+t^w$i4|%*@Qp%*@Qp%naW>cjnv& z`?Nc2_xq!bo~(C%cjgV;xkF8NRpZ6Q5a_`B!dRe4?4iJ-nn^BbW3G#k-C@Ry8>6K1 zFi|1nYG9%J6Zw1~PcP$=TE zjJKxS8sw(yOXU1VcRhoRM8QlzY4#N4$h$sxsm5=5TXLk)4U8Dq+V(aNDK!Fb$OQVH zKn}f=vscwCxRK%Fasl% ztF7F2M;h(`6PIWac2mGmDt%MIIZbzzp@yDDl(vF0q#O+j3P?CvA>1=4f40pXW2Cqu zS#UT*N{+;1K~zrz=Q5xU=)nxf87!`S%IyiF4k^cjf<_SQRCZ6wMshi3)!ob>ajtiI zy$VH0xH$-HHT;Q^W4m`wyA8(Z-#S@jHhcbzo+k+*a!vpTgZI^T9qvS@w!hSM2jLfMl}^{yLZDwdM>)|*&u_hz8Bmtj2794-VYMXx%k^k9 z^F)}5q9fDxveH%#kkRn_t<9FWrODxKH6f!!&TYs+Y-pv@?vxR+cegc8T+PcwmQ03_ z+W~?Ss?N%6d14l)7u|kXMz-_>88YrbhHtyaN4MkLi#r-4uJH6W{HjC9MnLlOkzu=z z9*$EoUfdv*L3#Z^e87BUN|f-E0LMzWwlw`EIdy%D3@5PsM}`)2`u_S}q5O)0lH%4BU3C zm1?$h?b?WO;{wi^WmaB;@}rPP-|J^mx5|w;nzSv3i2IiQbxl}SiJ+|jq4V{P^qfT2 zcDXU5#EsVUUEipnwU*<`M1_R$Y*LU6P0BWd9AHwYM9_9iiqwD|qqF7NsR_5kC~*!^ zrtk+50wxFu<$mg7nXo#~0C9b0Z)t}N5$6Mu8rtsErKOX`IMDt~bqLuBh&p!C``7l@ zs@+Zn?fKM@m`wMd=n%6Dm=H^mMHo2a(M=m-=vYdSp!x++#}bKj|A`HbyQNp0F-%+& zL=7#&d_adB1PPr5^j4@X>uwnu9h=^|v%LKbImc+uthhj48Qed5QD2WD^l8*?7=>u+ z74;TQq%2)JF)ha($_Rf>B7hf~5pj_oil`ZUNGlS15wPelSsQk#FLbf-;`+;i`k+F@ zTmlT%>8zjSVIS4zP3}^IPyg?7+~jG}U11WsTHn{p;z04quusL^&G-j6N=~#0yE|Z! zMeY=L4>REZqsZ9|Zy83h1rbjw#1`E>4IkG~Wm=F#hiR_>gR#5r=1VSq+&wet<_&dD z)ThpE2+W9)R|OB9VJOnGa?OBSFwlWE@Q@**21KgX$f*N6M=y4Z2074L0tPw9 z#!S!Oz_4M~EgLIt$UuoqR+a}&uS63YRQt$(r<6k2ztX%2T;@#9b%fmpj>dUybVsUYYj{387-?0 zDVKu+_k!EoX3w9PcFrJio)}-&0>K9;5z_|d=zQtpu`(!yu4Bx&#meg!!n7<6=)wSu z*ztjCHOFK8`?q`6w7NI9nFs3?$+#e9M2)H3kHqLI@wNvg7bq_E~TxAV!Yffd9 z$ax?*>2`b9^r)OJ;T~k518ujx5IGM92X3k@!3bMk^pg087;Q}cWN>LT5lxsqr6kFH zD7fjC^&;7;B-`~2827bu-IBy1Rfm*^gMuQV(|8w-%3*slUHAxt9Po5ukRs=i;HWz& zEnDx!q;fU;_{`MkWLYm_aE~(nuBcqiG;@R0Y0{%%lAa{#Y74j&^f5+_^O$lqt?Fec z|5(V|35;{&7tguJ8DMi%vQiT==I#jwh^u71gP5R1&=Ubl zT8x=3GoxF_-IELxR|b1p46j4TlL-l_nV7}#5l^q!h{XLak^OiF4N)CJ zo(>2ijG5z`ste`KX|7Ls%8xi7j21k>zo_aehy5|!0?Nlj_~mvoa74o!P8OhZF~p>b`0)lba5#Eggx z$jZwKPOA{{QXu4iHV7D#RqJ{v%Jf`$Zf3XKn!RhLe2ROSL3c)_oFf4R^PyOqro0@c z9IfiBE8-*9J8Kf=lZ4xk?iFT5TwMnpv&d=%{x8u=$z z688PQWk^USuS|xJw*rDb#CO@-74|~BuV(u5Dg1axr0N%cnqe8iY>?QL)QQ5`})3P`^Wes5(32aRI0$;S-+|IooF9GdoV zn1<@3{z;{+GsmAW8{%5gjBg_+u0e!=PXd5(c5<;;TU^Kb{_<+2HD{oUZ;yzDYvPO0EVqNn31;dRI^^^Ccuch77ecV9EyL65mqkj$@x3Fi~#OD;^Bni`k8yW{{nTw3q1iu;D4 zxs?-(bp_q6buenEQ*!8TDO z^}C>I(bQ9*WUaP$dA?`B$fB7)M2boTeIKACo-(z2=N6e%_<>>KdXXN_%IgsFLqLuX z1tfhkiwu%xWp2H^ZDK}_wfT`z`}dc&9#)E*W+Y0>y0SFl$1nm@8NQBg@!rtRnw+BO zClr2S__#9KQ|)^siT)|j>I;b*hV>ogma9oe;(lhxIF~ZxeWip5`Z+*HN*Xf(lBr#s z*87F=;?`H1jApYvA?jkHWd9Ou%u1x4u(9nk;)DIlu#vsRIxe58k@suxP(_0OGdd~$ z-qKHNURX+vz~2Lift8HEaGuwc zKj7gH#)^wwq(*Ov3K@R{179oDhGui6R`J)f|H(M#$iENnYLC zBO@wYYxNgn#SM2dX)+m7{t5~@BHqBeRd&>2Mb@-y?)PUJ{>^}KZ48xs1qiqN_Q=wJ zzrz6UFzMNz$yv|;Fzz`~nOd;W*2GBrCuqqxoYk!@bC~9I{~8k4^9@sC#QhsM3?{7| z2xpy-N*g{iF?+uIk5MBtx87(syNw0kimxY2{(r&OQJysSww%`Advl1m%l>rpI8Q3$ zUu3n2yNXB_kvJ4MbWaTU`2%y|LSc*9tUJt@acwtswB+(3C(5*54e+d^ zuWin(ySh=&jV!l?j=onT?i#?M;G%Isj8QzHR#ProY`epaHxk98f~JxWCP&~k0Zcso zNeS$sp15lnEv~QVsU}n+=-L3Gr`$;xSwbzQK`^PQ1X zjCbvH*E7xmc)nhP`qzg(>Kn6l$K2fXY+3F3jn@O zIPqpI-1c;wLHf`Cb7)X-Y+sNf>3EQ|rs}$&xvs1((wcg+AyGYTf=Ll|bD%K%LFrG; zqF@zCt0_x(Hfii?gMlZbz8KWjo1n=`(~uKj2pr4Q`jsu??iL1)EBfJWt;-oDa&8F@ zz9Pmex}8nJcio!1m4V`hm6_pAqC?Dyz@W7duW(xYpD}mJAEUABe@uVjBD`X4nsPfP)>qW=$;~?^ z=iKcL9G9cLwOn-wxdR|c{hq|BXm>Qk0oHG*5U>#dEg61@H@Z!BxQx%al0o9?cR`ds zNRf0BNN9e2d!t*z-N}ZDYkqs$DwQGP6d=?g4GVR7J)Z0q*kq)0qDn+=Rg^5&O~lCC z44#g#EgXy0*I4#^s^LbWaRu?%AV=V703w{=^C7IMmje-Y_-NGWMvV)9^<-yWk>E1` zJ^|oxq|RK$;YKH%(l1LoWNA@(yKJSHmW)2rY?zKpITf|_;={V~H0UfCgioD1iA|1u z9^EeYMx1RHj6_*b&^O^Ewjf91oj~O5D%LOfCiKn*iz~sICe&mIxeFljKO1yRY@gaS zBL`-3L8)winH!y$bmtg$Dk_Z}Z)h&_VI6TAb1sb0vs#mNn!D$=ZQg{{nj>aFoWr?J zA4NSfN5O;_5L#7fi)6u>G)P;F7uO*5HmsB)YAaAFXAN~tfo)X~2laKf88>cB+2iXJh!VdYcm_Qyt-77Ix5J6C*%a5o$frN9B9v|I)t1L$ZZ9ZUx8$Y56e{~bvYBb9F{@2Ni(1v^-c4_T6OZj zT#rUGcfw4x*gc!Pc8+eJ*y^UtsJIrp-%dJJBzPCVDBZq|4|}zJH!*F{bE9&ir<+Kt z5qJTBC|>o|R%u#x{L7@f+#WLKW{ehByzsu}bxw$&S%9=5#<>BQeVw>iTKzf0#Feky zc>&ZRWj843+mjAP-Awsr7aHl@sO&55UxFHW7lEfEZTK#-2>asQvoj8Tfe)ia_AYo? zGf(}OAVbC_U?8N`s>?~fwbG7>?K@;$u$?U9E;U@-9D2}E;5wRQ-xX|hCKgrnlNJ~w zxVsrQt`?YvlIQVQpNxOtH7H-9d}vn@UYa-gxWPbgMRRpKwVv}$xInN4xdu#st10fl<+ zE72mX4H(+G)M7N93zf+O*D>Bm-(>b*ghn|cyFg-X6k@G5e^(&mJnuE;xqX?8 z)@1^=c{L*U^+9fO`~QbXo0C}sGUZuHgjWvByuwg%J%4W-s5(s7y#Pry8n{vO-iA2P zMnhBxxDo&jQ!pB5N8-Et7$~kZW?Bz_P!*5rC+@z$9UvOFqOf2U;LihmeN?l61@7hkuwc4zOER86qAIga(!> z^NmWQu~x!<7(~I#&Bn@_dxX&<$EB3b zh@i(aF`Jaiok?ZPB+~Ss;EgP&?-IL^3SGp&Iqv_mRcb#r! zWy5Nvx?Gog2cKdV^pDaktu)(hxz^HuFi391*Q z(2eFYK9Jq_?m4F2zYrH0jbkCb3=z)-0+}+u)~UhEcF!~Yp)-XV6n{SL_qQc)*nINI zC%YGz{?PV&nT?Pk;)S%o+LU{Iu@L@6ra!d()S&o_Ar9BBGA~_FrP|prFE__;s7lw{ zb}uo`i2TL+9&EvPpxj2tZB#iTUkW6~7)!j_)4j|!=a55=-cR(2zoU@7}n`>mdhEUc4Pi+Q!Sf zO{wD>yu;((VEX;T?gjQ|^fr1uLf;6ej$FFk+%;)!Q3a z>U(M%n)8=QTHO1L(LZ`qP%aa(jb4nn_XCHHXfaHYxDOa*=!Tpc6#pQ^(L~@tmj*89 z+2Hr#xepm+=qAFnHhL|>K1>)K;7q=KZ-(k4`VWi?inN#9n3CKGNv7qqK%;`no${!lpEJ_X71S$G z`124(GcM=qG`&On1=Ak788;Eie-ZL%$*@^Unz2=J9G89NabGgdx$*}?eG;fiGnP^# z@XG+|0W99nb6+t^|KMuDH>r9FN`Dp7$SOH+SBAOb1-q}A_|REpB9#9+g|J zAXDnNkM?^eJ#?nveTse`qUa@Lx9GB-_WA=89=eyH2E~5}am+PdQEO~yE^Y|L@JB}I zKb$Yf6%pFlT-=x~j6Vhjqq>D|b8J3xcRctspe{pD*TYH zc*H+5%+PMWNlEP$DWZN36gnU|46vE%6@Fo)p*tW|>gf}H2@nQjIBx}k7HP)mox5Kd zZAAWJs0JyK>v8sf4J1liyLNdOx#)gloS{n_HJEX~g*fWR{KDdj`<)37T|e+XMSl-b zG<21Yi~#1l($f9Gl!tCKOoZ}(ggi3DGX77dK6HjqgW`XNI6BJUn*GJ3hwdn|4%uHJ zjGV4j7F+IbCOdRa<9%BDcZedVW#K~8$;l2&?jNQ+bWWQH<^KtJ4K8+ER{{?#J?>uy z8M+2b-y~8SDMi%3fx<`i%~ZYW{$q@xKPojS{$Ge+O~l(Pb*VpxY~T`yP4c^;>ZuA) z_9~*IjR9klwTArEZ0NB6ssQ2d$@M^VMq<1#3c_;5o;zRq3C7(*A; zASLl(l_KieK*2?%M|2%S4DBKY2})lV()hgM9#(vW?|LRa^yf7Z%3mMy+PL}g*$qs4 z_{L2YD11W*!)Z~Q>(vmQypib-?X(1;Y+Uljz~Cb{YWpOdw6?-9=uHeX^hY+SEb0iL z)HAghr909n!+WNt!YsQf0QeRmoN<(i5B)8&F^r=jj4~t(6ha8^7}FlQ44DY+J{Iy? z`YSDU8jmya;Y+`lp!D&OhEIxiX<h*DLjAMdshy?$*XQSN>qA#F*Mfsu6e_08z^1 z;J`g-$8T$tp-Wkip!Dq^jZ%ha7K4emH~FDUnFhtORqzf#pbD1D?UiPyT`s#j8er%z zp%S!yBcu_OT9rFuOC~yW2wGmJKDCC$Waij~*ooV7j_Y+=$!e@z4S3f~(?QGc=QqzS4?riz>mF`ZUXy6^o z&lj4jrP;Mcr?P+CId^BXVC0Z~6`o7GsGut)_+WB0=Psgp@i8K}TB)zhH}{vu!vX7C zWukLgxDpyVNB+jUx)V`kN-3ESy}C5vT$q3|iN$DgsDOu#7_|S_WO6er*{DLqC=f>r zVr#RpSYPrNZI$pJ8M}Gj7US)dzgxdOB~QFmQIc>VE;-bcuB%New!(@dMZLY`_m9qQ z50`#<6UNMh@hB50qjWxGHHjVvTD_C`)lyBr>NcbGZ`zsQ1$k3R_(3Hoz8&Jo-S(P$w=^LkW;AT_Ng93v zZWc3@?mRPND#{EP3OzN059^52nDb!_T0g&o!++XI1NU$JnAttMKU9W@oj@ER2!B3- zl;P5xo_gS6QwG~6e=|6<6BSKo@|Y4OdKb{>wXErCm*!*+)RU)&B&S3aoq9G=k}m)m z?j66w?#7-*b6-PFnCUcSZq&^T38zlxs-vH!MLmut}e2=n0Klz~w8r3(bhQuTLGdfDbuQ!Y=|GUmvaQ)|!OA z7G>vECw%>jjojZQwL&f;N-a_&@)96#C`j%YosrW$d}+JXcq8)f5#>B=X;o&A9D#Qg zRo!`BL)Yb7x^`)&_+{b{YtP~7cQg7-6g?y7KX+dM?YTSbxuYO5{fid5R&zxU0Nq&^ zr?IiLOI-dALv<~Ru0sP`;5Uhn;NGx=GHbX2lpI@&|*nu3%$a zN{+-;QB^NH^?65Ky|XNFg?iPK8%CatBB!MCB_$_KE1IwZ6{S(@xN2z%|DS5mT4q4p zhx97SE)*p8a$t`VEOHax)J!;YO0I!IW4`8`k>jc;g_O;Qyejc+;IAk6aB>B1p>K9O zu4B+U%RdhO(gg=!1SyibAi?vypj+$8Mc1pX&YIh6m^jZ*CL85lO<78YhR7%!-q8Qo(apVwv?ZmM&x>Al+H$4YlJ3cU7nQ15_c2>85W5HG%K%RyuTN|4 z3v1H#Y^&_=Xm&6V^RKx3nIQ*J&+@9o-ye94Zc|ygTL#A$$?gcukUqfRaYMkAOwSid zWJ!M@=xEc~90U&*_iR4M$o*q!>^CT(SUx0rL_QcultliC6qMwqyloQZ+=(^y4>5XN zyH2$Xy-=ElJQRlHa$W|hGBfS#<-^Q^gUlrj=G$(lE$XlJ_F%;3u+xDuFsn%Q$nlsyAF-1RuL?&+`&9Skcb4uuL67CwWX zVFtx@Fci{H!-u>o@y`Sv#UcbJ$4MV;(@bBde7 zQZce#4pz=j-X@F4v07~!M_+pMD-0c%?o{Kb@YHj!I&FC+Y{@mPGBe;;r+PbHWp*5N z)0$PL4X=g`D1i2{xRgr_aIZ1^R{4{`Uw|Un&!18x_O-yG(OUFl-x?g1va7XZul4H; zIwF5C7#bv$*(*ol>qXTP+OF&`&5m9?&KQ<^gE8Y13K2_aUXj>00(-cmL(U=5M7lQ_ zYLonWaPm?In5Y1i-wb8M*y_#F?nYYYzpV=@Ye?B;=sWwiHNIe$FX3_~Qhh`55!K(_peGhXMN1_AxVIO8#_kf>UC>UpgmF zOFj-u)MN7p>(;b^_=J%oJvRTcQbVLsg-H1%DEOR9HvR5CWsJDbsjn&_5uyI4p^wH+ z8pSQx!X|FQ#4ewUpD|8k`;)Y^a^iQE^AexCg0;4(w4481^@Mtl)QpuFr~aA+HS9)HQGapfhv z?4cI?wP~y-V`Z5v zmvINp`Ieb;p}aCUA5xJ5U6zDF-zjA97WU& zKBN_i{VuS%ayZ**wkEKV0D0lQXXt}2hZ$j7@O@Z-maSc1l6_;P3C!Aru)zW=R}q4UxR%EVKc@o6Cl35|BYee z!c`PdCLgFq+;4$Hn0(D}x)e0Zqv|7~1w%*oH5kvCZem-^`M@am@frRFvGmg9{&y2Z8Qx$m(sG zhL$*_X;bttbO!Nt^hCd-`vm^E?bl`3ioA2N~z{}$*cuV0DT6HiiCmfiR7@is);>Otq7{W_`iDs4kt5-fM(lboMo3TsA~7F-_|93u(IG%i~ZLXiU- zeSy7!*$`P^(+gpIl}Z05BThSR2s_Yfq!KV8n+Ig$OEUF(BQxZnC%yC9wByFGqa=2$ zYCG4`_ zqhZ0#3ck*k?xM^j%{7~|%Q7$Ejxi&4|L?v=mO+z_g-IAsh1?f!Tq2Jl^mv@vaZZ#S z`7e+bBk*_tj~2O1jSWlF{fEDqVI%8Fx_b6j*t9mSxH+sq2a|EYWpvYW8;l+|##q*5vb{hUhgxw*d$RJk>JG#uQ)D zZfm%>zBi@e%UP0v)Fk(I;G%DoQJ^fTlWJX(L1k*~nY+EgCqlvK@5v-W%1$1$x-{Sp zFaXV+ANR`CpX~8zblAxF47lTvHUup-ppPzX*a#co`^a>jdhruGN2gDf3wtXIdQx}E z@NxN18RhdKt4Z`pKwn$XJ1f;)v%#l3*?4D0Ww{*h%o*L33=yXQflo~LK6vz_RrI=& z!ETfB;(8MbY5Mq(S0#Qk@CXD+;A*)hyIytD=2Sz*^_t;jC8mW)ISmw4R5Y@>xmGc! z8z!!z@@?htK`kce41iEknQoaB+y+Mu8F{AR;uhCXBa@LSK|;?06nVH*lYnj~_{e7) zEiMnk%L+>ik#Z+cFqmb+#S&dv??2g{4Hq|}qe><(Gm-?~1>p1?75v8iv-K-5i|o!Z zZd|yuFjLct#GVT*;wr7C*7_QXOxcIGMhrS4e}EpVf2$WHG_AdIB#wfZ4i`ww4Z8`u z?Ma;UzQv$%In0h!seH&t61){)j2r9KPno2%sksxRSAOw8()#m4&>^qa~=)P*4XRNq- zpAt}9>RAOzJRd}O;;9~8YLIl#D|!~RE~t|=GLy!imOma`T2pe$PdyjN)0&;I2IF+U zTV`8#WyMVy`HU!X&z>8o5U>k?bWb(bT$MGI;&8Gpm^R!&^i+9O;x7OmwKJ6++HUxd zJToLarIOF&M3wAWuy2}Whj~v=pEG)#8`g)O)+K*8_;9wYS(}FV7 zA4c*BuFllt5|V~b|2>UA8db{jgZZ2uu@zu3HJY9U;y%W4j*r~D(I?~R;k*q0xl@N{ z^hnc?DhxqIYpstN-er`_szlB{aPXmF zmY*BVoWZo;fN>w1GMY+0?3hby61@gAMs}&*kxMMEFl^i)D)ytWb6+W>U_M~|LRyvhD}hJwR{k+Lc`LKFz*})2vtm}i&Y-bvwg35By#!ix zUs!bOyhY&%m9RD|b;^jlpBZ-P|H80!LA3Awun+xUHb&>`!voBixG^0y^jq=42mLbD z;(>r7;wSOs(w2q!3%asU=|KiNJu2mRS3xNers}}}93wIrpIfejt289A>Wjcbj2AbW zpp-KCpuT)YmnJ+ECiG@(WggQ;4>R^bXRHbm`fxzuzxz|Qg8%*qqs95};bl!|T8NZK zf`Z&-cuUSs=vJ3^`mm$V-A5TSE_W%VP(JkP(u7CD1mx~wYf(>!DT&81I_2;EdW^B- z+&9xuxIHC`q&^l@H0NPGw6rprl{b$wa9q1uu&g>KN%G@CMz@`6dPjNizR&R|7(C8p zPAO@+Q_opt+VDi!fSI^%qu$Xu)Y(1t7W@|XBx8@tzYlhkOhL)^L_HFp3?lM7Wm{N; zsQvX*3>)Vpq-63*PRWw~)B?Jnn)Mm}wErDFB}@9#K}RLabY)Y`^s!sdFbm?QAP4kf z8FAY2Oy)v3duVq&v(G9{9)I3`N+i$T^ARVo&+qqY)87cVh%TzR3(r(8b3 z|1(K~Us{mlaU0pb%(w@gWD_O% zVSVAN3=%hbNNME1A~nfcdLu3R=eLJY|iI^7?4LSWc?Yp>l7&^{DppvGO4?VIp;GHx8 zbpWILWBk#T7p&VU-)Yh~d zz0W{#Ci!SvGj1L$iZgrczt{i((gUR|JSeR|ftOjHhPNK0N zGH7JiERL|R{p%=_{9%wWqnQ*iJySP*RtTDekC*{F<&Ouqvy{sf-Xd+9@==(Qp9|?! zup~F^p#PW|aqx2?X<=IM@d69{(ksr!eBys+K}MJsd=eI5kiqJfY%0Tfk?vE*kLzVA zlc~a4O`<;yG+d>0-;`|i`HYd{TqUX~nGfjhOp@Tw0*p9W+Mz01ZTvYiA#Q|0JvD(3 z@YV{%Y0T$gOm53wn4cZfi{gEU^#wB}Zfc^Bx6`XmTfPWea{kshmpOmQEQl)!eJr3R zxnBlXmsb0b@=`_iisR^yuNd<#QH=lt+7fTRW%!v|WPKGZ^qUqqcCu$D9Yc0sGiqEc z0iQb@Y^7h`9<} z;@pEiA2fAP8tLB#JwFf{w*kp_3?5gzS(|#QmPr!)U4W6fa=e zxU>){-vY$03Mh&XR3q+(z@gV~B@LR^w;JjvMB-*8JPbUPqMhp97ADV-Y(bS4zD{aJgR?K5iHjl$5@%D*3+zzjpyc zRhDf_RKG1dtMvAdUzrVY^Ox(EfEl^xJR?m*ehov8EBp%M_!hpx{l@HwYeWZpibR=a z{1#@&|8mp%Vs>+>Axj!kTU38%w#2QDLrEw)h)ZJP1@biL_b@28f-7t)@LBQ)Gb3&Q z->1P%>yrOR@G%apbUV%M6O-6Lh5vf^p9~)7&_iTN`X()$MT*4!8CbLjX`gEbhpu_> zUkn`A9#BTXe1IR9XcGNbk?#7d%+Ho3@ZvH2O@4Ew`#X>tJe^+KHGi2@*Yb|>(J}WA zGo^piT-{h{)lc$=Im(KJ5ZtWFGM$tfjrgaCYDYJ{c45V({-8WDH8JP@Wz7CU7`A_$*kkK5r{TXe5}B1E>%U-OxNW&`;pmLd z*F$c>g?atU*7|7-*+xY&uOhp~`ulh5f2e#-BK6v!qP;;u z9p64NmCMWP7hHbwD~=E%=Z4_;FIV0@m2Ek0WUT(-CeO;1#he&)UK&23;^-YY4K?c7mDKDBQtkx(BZ@F!6$^aUb8 z=Fy_0;|*MbR*{2IWXtZnJI3_;x4+c4m$looCy^rOSa6OJW3kFk{N%FiJ`nf6EVq60 zYAAP{QRDjB%!0jQP2!IS9??(PXjWZbT&XOz-OUUamx5?#6Rf*3gPP&{{(y4_jTifndM(y8Hl8TpoU#&kCNRoXb*a+t=i*qD9>sMvSR4Xsk zm#}Ez)`sox3lXatn$iL_5^n<{{E=`_bY)>d+^2SP(cRW?ad}&C>a>ZGc015eX!iMQ zFWM#i&|2HP$KBp=qe%b2-CL&Mzhz#J#5<6P%TKE9l6(qR)0{FEzA>l40aPr?$US!vDz}cctEN zBL<7Bfl}&KLScMJ^oSe<5;d^iXd|YD?n&=NmrG@2FRDTN59-)KP{gOOK#|z3z@l3U z0qoVvo?00v!{X}dF{8#MwIB$d)+2KqObmru(%-vo!)-H8+}Z}brzr~p6yDCnT;6TA zT)pIb1L#3d$ZD5pS=%n2~zis2-TQhTK)r=6Z>sJIfZuPP`Jq5k>MM^Tic z8&J#V@=q8O5mn2FkiVuk%kvoAz)3{J~)k<}_R?_>laF6iRkVI2Zq&-oO#9bhw zU1VjH!jGP(?WPSF*Dg{(d3>N6aTgF5x(i8|-0IkIZ8u}MNYBXM!lbmk7-_ShVez2( zHTc2J87gi%&E8g+J>dtEc9T?>u0;}EmLu@%Rd=D0&J-&KH}-ftG&kt3-CTj@Xr3Q+tU-|X|A_UwO0E`J_ z(_=@fmz*8ZoY#|;B#`gwQUvd2w77YUAf(A!Pn7h#gO27m)up=ja(%HQo%B5n9M|ZZ zhNA6Bl!79uWl+@(@q<^Xm(t_NC0FjAhU^~$U_X=Ut5M!eh@=Wg+B*8&onNcDd1J&C z$AbJ684{`>==80e+)$b=Pt0!JG2<47WVG?JW}E&LWXPz2fzf_Ae##FrWVTCxC^zJo z1Gi|zY5Bv!l`JQv?A-5C^0Z_LmY_N;*D9^cOLCs`Qq3(JIxa)~yGl&S5K#vL%~eH* z#ZnKbOjNR$O8plEvva^yA13x!*b1D9>?(xAGQ@!}fMg4$tXq+Jdg z>WG{IE`~_@E30kij1JD?4t#k>aXDcw0FcC34olITSf4$H`q`>T$8W zTtHMLD0?r+qG01-IrSe`_blLNcW+~i$UhHmuuMSHU{67gyeq-O!e!|hF4w`?eT)<5 z+^4z}6`=Bcp^WyzXP?|D&??m%Wx0lJrB<$X=j(dQC<2ufQYO6w|JP;HN2P^Z{Qe|G!EE_c>E+S!Vxm)=`MvJRc5YhA% zsS){LAQ6hST2U@HYT`XSWJsF5uSk3HYNS1swDLX~RLf00JzO)at>3l@5ZQ0nJ})hM@`auwX;j24%+f_jsvk@tA;(CBegv3XPIjh(sEB*Sc$->+YK3fyR~K0^s5Mq_cZoCxPccAT z_tWF@QHh|Z0)$pAjA9$I*JYo3nqlHvwSw@g3X$@3P%s>j;r4QAMUItgxn~$7&JEF5 zwRk5Y)PE-RtNL%U%6r*8%kRx232jtL*1Z7_gS**N+SFfJDm$s{ux#5tqlk#yD~5$<-~9 zpzLcQi`Gp_t1OCKE?p*bckXqjAJ>TTWlaSiphV2;fkE{R&8>{lTiy1uoIvT`V63=~ zr>DWjFM=3xZzPV< zBu7EJw-`09yeBfsO&-&#gufN=&{oy<*Qzo<@iyZP-Bx)8YQG)Y=w?^jd#iHjvZQ>U z6ngg#1H?`3hSxPUSt*j<36ieF7vHR`6`+g-7G^}l?JKd4y7w6~u3N^-3QMaH@&3LEJk5Q; zVE@Aeo<{ly`_fN$ANv2GpHBLR`_j*FANl{FpF#RZ`_j*JAN&8HpGo@1`_j*HpZNcv zpGEp7LC4e_2IsYTiISFN*0Iqkcjn!vj2sucPb7R(*rQ13rvXI-I83(Uf))1}qxD}) zN=Kt$q82)2d=?BeQkm_ePOb`-A#U4!&S-Jdj;i95g^j7aM1y*yejZdf9db|oBD`PO zNrQQEnN?U^bYC!NT!vCaxqL_~68lA9k)dH|xxXsw+`eSAxC|{AmO_V&FN1+$gLnjvY_p3OUg=ZVUA~7Y?Flr?gz$=i=CR9rqP~?v}wr?VF_ZmJQPS}W>ZVe;6s5hjrcK)Ky*khN5_2AWnEc|Dem=x`-zd`ob(`|;Jg@V zKLri0W#ZjV%EnsvGh@Y#=abicb}u8i4i6k^g#8>al&E%V--3?lTaC)9`-QRMS~GoH zF&Q0FehEsZ^6P@3j?5wB$k1OIEw0(I>aIk4N^6q*YmjxCE{x;EQI`v;WE%80293*3 z4&Ri?2dWYGTj0>VZf&kK9Zvp`Gv>F~39S>UE|#Y~e}p}V+(}+x3UNt7MSn7K z-1uBxWp)P%s{a}4=z|yLmn-e%lFS)2++R#SuGEHi6_L^*rA5bKc$@PEvLxF#VHQ4^BK zM3dnE0*shTdu>(c0>u;YKe$6~$@NHaekxz|rPQAI1Bq7=S-*=wu07MkSak{saiy}f zw&EaP)xZ-`87_WK5Ypr(k5bvoi7pK|6b2kE%KO?&I_IS47A$pTaeuijVGC&v4>Jqm z2E0^KJ|B8yX~5NBK&pU?nTVEL-Jl0tz`YVV*8oSpZ>kZ@=8ey_^4^Mbha0^AN|8a$ z;CcYsbWPZVrYamnAlqWb1?`e4KJ=2b7QCh$)6Q2>{JTMA`N|03}?r7jpCdVu7wXu3dUTT*{+k0mHEYC5y9&UTw)7G3W*wd$Afp2@IrZld)KNg$jx_EnOjjn#w|rHD40E>q~9F$Y{RzbHW=_=8n#6;P5=X?$6O5A3|E$#?rveA5&7rAt(pnw zXSAmvN8T;L3tp>qE$X%2%1A?dtwDn7Cqf;e2tQAZ$?RL3c-&-m&t$WRQ2sVFUN%w6 zO^9%P+-*%i&iK^+%OFAZ+d&<{%hsMH-_eV&vC^!_w57YfQQ{mYO89TlqetW&fW$Id zPB7KmlA|&dSg5t78EIjI=^YIn=Q>eJ(R}FDr3o8pLNc7ztC?#Hv$FWMB^T++;LJ=o zKdLAuQffr($DKMP&E8j2xO*-mv zY1H6!gT<|H$!*jzA#%drJuT%(ZcdrgFdvp|6BU6q}RvI|N=Skf%G zvkeiKknlE+X3Gc>b0=Vq%M?*4%uSohQeP{w67kMP?Z3!kP}%I2rX6=-a>Xr| zz8_I$`Hwrtz;Ppu@U|9_j1oELf}^Y9{DvNMoU*6C<3@}Wxf-r_LlCvd8U;(;&!u{p zFse27*5w%9aFlqt);?;U%wX)xh880r4 zd|k7X4_=F?aiCBU!U?+QSlVuzf#OPOcv(4VAyT%30(T-zzsSPQ;3(}dQrxhiVE)F$ zNSh!n9A+q?4H=zxWVQWyL(&RPMl>9G+95}#I~SGqm4+?2fIopkZmD@9Co6tFQhO3_rkvKL1+%aKu5~D8OHDz~88gncE-0eVA!9cf zaCO7qEl6K_p&{Z1l?4cqA>kqr(Dm%p%duzI+FP*M6`qXW^%qyE#YJZamk}c75@6t% zVxfn4bW)?M64H{u=7PJ_SaD@BvuGr%MciG1LmQHDROD)GcQ=EL$UhIRXC|P9!k*yw zh#Yx$2QO6?)-c_C4?`VrS%3~1WiT+^yd+B`Wx~PT)AZw}o8>)CM0lUN73#K^D<#QW zH*dOeQ3brG!a;z-RS08VkXyB+H>v0Mf-M+qChB|SwG^EHrAF|5G=av{U`#p|C2fXG z6*SzU84%Yvv1?Q?AA%TROMsmy=~t~ML$05d=vFSFR}zTchSKGh;@gjG=-W!H0S~$Pu^)K*X*VWhMQ0<}^gOVZw3kBNwlQd8{Bq`6lF1h$Y_EU8ybB<=S*t8eLeJ zmX$%`*GWS5DN)&>nN znh$26d0%!N1I1-+Pas5Pi0A@wydW&l)ABQ22olS>fpVuSGhS}5L3c)FuBmCp+EbA> zE!hW4(9P39cBQC{qv)#eGlDonmKaE?-P zB;E@|L>*;dl$y>;O|9J9xN+W5BBc46JiQ(`r=y2|HnOONKSZLITh8`k9(Eb z5$97{j7?3jr@SsLcr`2t6ET>Fk;6z=_PEy=G_o}gt4x(C)!USac`Y#ua^3I3{HkBL z__`r6d|5GkkVF{*In)+wil{p@crR9sV1u)}noVocNVC7gDjCkY(WLZ&Do7T0d?=Z^N zs3e+(7A1R16iIz2sOVrbCmXtV885Datb0ZxIqwDs^)NK-jY>-vJH5v+aUtV^9zumk zc`qpN^`vQXttv7Rw*b^C4fj4n#m%K93YvwO4xgIQ0BIKihpt6aZ>16Fw%ejvkBW^^lZ)!H02Gu_fbu^`H&ZR4n`@+O4 z`-DN_+-C|XhYwUE?vuc!+EO`ZCUv^;rwkeQoeJtfMv&A`gNoX%LtgCOl7i82pD|qA zEJJR@LBsOk;;z+1=-i7#9y%Z>L~Nll0Gnj+RAyENM~83$^*~lKX-I z<3e#MN6kb?_#z0%MYdhdnhX%!mkbeC;0r!lLzT$+GB{Xk!NZHj>TP(Pa*3225{+$9 zI2P$EhCN^YbZ~D+F)f~aC{m^wUxgVMo!fAuESpWFhJVe-krjhiFZi0lC$C1@*Fi(( zh6r=LjpcLh8^(&u+=BSDS0d+|;Gn*z&*z61)T+VE3Y-$vJt9P zZ=oCN$k@8la^Epr+!^YLhGt{(Xp+Q!7g#iyT3_3BIrHXw28(Mjb2S|rRR2EIwFQ%A zM27yd;p9mBa8l)>CDb2>K~N2y&<5M}By&>pFjCytsU%ie@7p zqy-Lhi`-So2;(k4Ml@zJcgkQo0ogi9~s|mwY{;!M} zSN(E+gC33UrHJ`8FlfL+=(^hoVesD=DlTkP5C*3fS-%Af9;dae5|;1O&`|%*Xe09P z2ij0mj=mj_T+61*gS8!L-` zoxZGe+U_tT#|=FTYEXeJEw~yifJ0zM&`Y1WFFd%b8+$A&)h6Q081MQ?Ns@aFa4}Dv zpQqA3=-$l#^f!cAd95}U=LCb$!>ErXnewKCRf zU@@)!&K+g8ToPrAkHzx!u)Zq)%U%vmI~u0J^$UaTj$EsTahE&B?1*zF3I^-cBJ0>3 zOGv9tTqJkg!Lz7E*70CrlpMw}`pKNTnUUf~Ck3M*uSCwx!GYrt((jifNdJu$2^)+S zH#W#9vZV5dgGQvQ69p!)5gjsle4P`ka>8Ffp0-R%t%w}PwhF5MtX@Eriw zIFbzCF&n74@9t>GxaEhL8)8g`kd1)oXo!uyY%g;qW5kVM*JAuLIg zwHDSGyOWI?7Y9tFa)mRmOe0Q#5uu>qP$wK9w8@}D7nC4D_03S%op}C2n1$Ms+;7%) zry3zHGYguuAVtz?AmO8h7-74M?&Wmj#08KF0*op|${C;>E)<*nJ<|xA<=2C2FkaM8 zLj_8o1!;^U({ntFi=5;++ciRn=PX0Q{puF4Mbu89P_mNM=;G+^onO=S=u-xao4zbwkDdsUx(if%-_Xd* z{uJ4fh$W8GMvSXH1-*ock#+%S2#|B7n?H-7yy%*%((hJqmX=%$XymwVmtvZ^d?-?; z8M810S)GodAu=P&a^!^gQ${i0|TC3!c<7(}yqktR)c_+W{(v{-F-p|RrvmXy(? z^C7EA^hH3!*U$+lX|oztcd_B(g3P(OD;1&sCD2DaB8))v2tJwe5a0Jw!^FiSf`TTh zM~%F@f`^hT%OfP}t2cVdp&Bx#xSQePhHMb<8JJTd>h3_LhP(^SZcCS}-@{08%|JoI zs6|$p$>LgTIRj^(Zi&06!Qyg}0-7{FP>r|>aJjiMzpAF*5U+LKm}8RDgF7@9!F+j4 zBnhqpjL%2QuvxDnX>|+6Jcx8sjkp?cDE;9GFa#_X4HW0~gjY3fi3|x#AfRgVuqCO6 zlX7rV&27b0oLe?pT-Byb!1*(qvYJHKnK&Fu5YCIZ%y4lb%*??CUW4*`AdgQaM^Dvo z??!1~t+r=Dwn(lRWJLaXaPcq!O_V(aIr3J)L(sqi)-u5)Z6qIE!?1Bt629nbxjki7 zNpFIV${qrglI0Sr5PZw1ah1Iwz)3B#E(hy)u((}bmNKl!0q6AW{FZrVv&7D@}n&m>(=)t(XM+NH*OU;rIgEuUR|1S zCDY3;Kz2iEyLTVs#-*3Id|}}qmWxT;{Sm_=L#fOaQR)%-NFa|DBzAGg7MfbE zH15v|p2N=<@0^?%bB{9i#qyVfyU;{ST;5f=0#9|-lH{LyFf{4WFbQQhc%tZ#*7m!{ zm=SShx4@6}O5{8i9JrPAVx%=*tS@OaT$X%2&UkSnyqt=rHupOO$&(nuY0YY2@(G5I3tdn|xqL_~GTBcA7F&+wIEk*5AdNDrs2U;}4m}w4)p_{WPP+wFrrT=3nyY^$2`AfcRvY5uf(r zvRU^T291k<6qHauAPs_~J`+^bQO;qF>n^nHdf;an{5<)~!F7~cnqEE>2-ApX!w5u~ z#F3GWB=e<-MrYILw0n+m<2+8ju3SENEux+a6bexY*GRRkRvLTd9+Br6EUpk0#CuGP zwC96{<|h1cy{!?J7Z@t8xhX(-C30Q}4tlVpabLnFdCZx+7a1+C=upGwqB$TYNa%|J z#aHu3D~(}M{HzQsUt+wt)M@<44^6U41icg>jEBtEthZ<7f*{$KR-2Op#N5k_80YSo zjwaEbvZ|!N9CUP?sW{i_9)I`DD-1j)e=xXWq$CubdJdALz7kZp&xrfWnT9edTJxJ3 z4H-A7l$k(C2@&*afUsaT%s%PT_}3WZtf+Jq%v?c-jMst@rukMYdukFxcds+VsZog3 zd~lGU`s<-?Ytk#SpxnK|#N!74xwsxQsQyN%W5P(nj z3|oWoy(AEd192ex?W7N15CkmS8VF*i=sj`^sof`x5$C(6-q##TJxjM~jpQf6Lvy01!fH+bl#$}v_e}g%8w)1p(-60b zn?;A7pd(j)j56{0DSjW zXRXw3Rb=+?E2ba!-SMU-!X#+@S0S#Ri5x4rf}^zM+O)5ke57aM-}QtQg!mK3!aS~gr;|`mj7$%t1<~KEBDuc?7ttP$wx!7F8VH?s{>_kBifG32A+1R4Z-IrM zDob)@zc&u0_Kxj>`<*ezM+a6Rgx8% zqxggI;%1+7W3(Vc#vj2z!mRa``6#)tK<2mpWT3c~z!Wq$?TLCM{ux9JbFBBp>B~N} z=>B5NxN>6}%Cx6Mk<`C}sxvQ@mdzg&!UPC`MhtTUXtpB-}_?y3gtzX~v^ zxN?pk=F>E*?6(EdwBu0NfmAMW-LN~%pmC|xH~qJv2Gy@dbr~%!`WwWqZt6qFBD?~% zuK{gz*U1s1hzp>Z7Qb6BAUfPwalyla9ozD&oGSU(1RrBlNgGDN?pj7WMSi<}D=SfO zC--Wflo~DZ_<65xbJy9d?HsB#bV8`{LQ<=uvfo-w`U1skvbiS>j`#TiuqfAUwYEK2y zwBvTL1Eo{41B>2xwzs>zq2q>ld|%0Yki3>-iY@kX2f)w($l>Xi>s_htj)sZzFH*;@ zdj%$BBec;EOR<+zib~x^3%fc?rXN@O!`qsQj1oB~frHkQqvi|bP|CFlS)z<__{oNg z8*NZTd3;DK5_<};cNA>C(UDaI4S!x(Np7a7OYEUsU6wqrHDtZ-CNrlhuMYNc^5SY% z{+IoTGKt0Ft)MyFYE7aBFL z(Jt^=O^mdQKtqugzX+F5IP6(;xW;UlivYvR`uWmAq+9|DMsWMuOBx$%OLMzYlWSR> z_%`C>tTxLPS%Gk=apNKZlv6MtinVFVU117BR_QatL&!?H&x)LBeK)frZVE3^@?A;t zENjbrFw85{hP%TC1Vw1FoPu7*j=xg)0rv=+5fk!WWwN4ImnM{90zB)mSXU?F zYVMv!jccpI+X~Gnky8PupftDKyrB-dG}~!dRnS83fxy6uTQJJdy@v`g9W^M!4_KB{ zVC8=$Y}&$J1_?SY8Y8Y_riA7i9}+zxmw*g|AFZ0Wg>JuFHd@>#Dyo~RL{1$X41W9^ z<)#uQo)&ly)MbW?8~jj26UT?NBC&gbg$rlXF<4&Fl~;@y*PPq?z7%rlhF2qP6|@kk zuK7J^OI>Nr#YK>H{kV9}HH>>6zVpBn#t@eFN(xRMbHX&C2@}v+&R?-f+Xe3+yOwd! zisIsRpVW*FF_!~_a8bQ29-rSNetwYFt3BgL!nbE1Sa&2kB z-OnJAVRw01lap2<;{HJR^$mJ2rz`EP%Ymg$S>}dYY93&$@u&>6fxH6kDXmHJ13^X& z^tY+|?S)bt+=Gl67fMLKuh{f2M3BG-1DFZPNPxpV#AtCt`*lJxB1O(a!9kYVpO$4x zaYc9U$#R2-8E`x*OHD=7W>0BNk{=E-Mu?$Nl503t+#`$^S0)P@H0Y4=NHE|>B%_>` z#1r)N&PN$4&X1skPg#h`$@0yhN93b{R9DY$1C{;~Ya$uP#t?b8>#}>A5#z=oiI8HG$E+?5csdhm6UZ&8qYd{A z!^ZW%_PSE-NyW%|CRnK7i*i3Ff-}`Ui|$zlii-gGca@ToA>!FUpfKj+>;BH~=NN2K z{$_At%&RCi|4Wso0ncR`{la4Dj2esXd4`QkW6z>wl_BE!K%nPg`y;hdk92y0VdB0S z-`7;}A&3$7LcrksVRi{C9NdeH66gG-Czl{VRbL}OvEAwi5+)E4+ z*NIX?lfs9TAfYb>RD+P=%tYLT@iK$Obuk4k$AajP@p3RQ1R!mxm zl-Dvj;hYf^?B=3-onhiS%Ysu%yb?LD2M2zD9C;D^Jl^f@-e9mew>u@F=}J8ZX;R+^ zDgsaWHbiQj9Bzfvgxs5qeu4bq|EP^h^l8eQVG3H2P@1Lcm1Q<@#l6M&aU;lr+CVL` z-U=3~y6&HmIH%k(#Ra!mrnyWS4JB$_=+@OFG z`9L+|-U(b7A+NOM@Fe#x!^Aa`()=kw{;L91em9g6Ot`GwY-qGguKY9YxOJoYs;Va< z)PFDZQ3_Vub?i)+aX=ZDKe+c9B(4;M_Z5@VBJ2HN;iH6W#^j>7nuPLJ7TgC67T1X6 zt{O8Tl0Imus?=Ail@{U(@}IW*kb&aL+G=>;Co3yP*oOf_11#T8IEYAA_Aa@P7%I*k zO_hK|g^Z7afnuPmX|aEI*?r6aam66F5LZR0|8eMJd0sknt^tu%nJ`#zpD@-A`J=&Y zQX=Dvb@JTy+htXj^iP5waxkoGkh50Yrwkg`g-H(T+pI{56hWT`2+gmz$FGp(6hT-o z`x&FgO|oXB6rK4^W-%`9_$=%|cH3EH_(4LUvbd-!W2VoU4RO<3CZl|N%7~Kvd9XFJ zxg4X>?3CRX3>jD7Z6NP+JFP|17ePW7%iZB}JoVZp{NJhWONNUZYEwdKd`R?&{4$Xi zlkQ@yy&|`Bxvv;9&ZRU7W!jSolKE9IZzsvj_vtbNEhoItk6FVFKFx->GqTIiOu47L zH@G_GmDQsBFW1G<(67T#jA?`4CI8W(-Z#vexO!IL!UZXkz6nz35B+fRTgDi=KgPW`oRnI$owOi z$SuiIC+C&paOXc6>QwoM!L54gYXk|Z{~79tZpxo@{IwB(G4ajAihBiW{}tN!4*Ich z*udXRI?nFgR~89s{~g+BF1hG-mxL0rg7+VWi1WMozJ3Ni1Tn(?2^d0saQ`a{ekV4* zd0D*nhWnQxx#kXQbisb&=xak%Y%XYLM`j2tr3fwwvC-q4EFR0nsA90Znxi!}b z#dY25bVK58lXDeOOb5s$^u6q^YM8jjtRN%+9Wo9DBRdmYwYfGqhf6NNnwRAd{;bc# zj6K`8;18OKU6(+st_G`6p29Lw3CGB3c&ohs?CNGmTzM*3Y?@Uh_Zr~-A8ThF7}s%i z@p6OZHci?jWva{5ObjjVTmGw%)DxpQZRU$&1E9}#2U?segXImB_XDRFr?_ggg~Q4S@AO%v~l z2X0}Ny}f{&vD0B|1o3&J3<(KzI6nzhT4!c0zGo-}13OQ_D z+@xl^-qKYaa?{Yuv?uK&x`xz`r8;NqrHL^9TN}}ibIfE2Gp3c*42luY@_)#oO)o-kISqO z^Hc3%uf&y?YcemiRedCGH0Jwrof%%Cd@avFY>06U`-$MaQh;u+Y8KU}@DmXuWxB}R3Xg%g>Dt3!F%pHWFa*GT1Il{BsLm5UZQF zbV_U?cTDtjRubn{9An2@T|Q*-lAGW1lUM`?ZDDKJ*XDAsA_*do#mL=DDGGD7|T=A6Y*>n2|XBn zEH<0la*=K=ntLUQol95jHvuAj)-+deB2m=I>KxvhFwuPzAUfF zrEN}$bwy(iKg&wu+<_d981;MS%p1JjQL#E#J2>0QH7qFN4iY)5FMSFwY+szl+$`;M zV9fj((!s5j9Tpo8C16EULTbXDJ5;p!?jpRq2xPsJ(+;|5(h}>= zWYL>z*PZhJPD*!iOswS&-I;14(!cA7{!P=}F1Y?prGIzo56q&rZ#W~}!wCmHi-L?8 zjgc~}Pxo}fLBFH*(r;4#YWM~WJ{!GFWykVVx%DJXIrZ-N^VNr@N)Udr$dxJ;OPOnp z6#m3C?b$f!!k;MpHuX7Tu>*rg!}j&tA_jI4rWq&2+SfpLIIJe*ERn@eXGh!4r#a`W z*-w`hq}`!5JwWj4E?@>_W#ZI)8GfiGTw#^Yb5ty##D%QS@DvJHmlb~(`Rs^GmyJK; z@CEa8-nlVrdQ6}1VxldO#&NmcOZVsLWLtDxY({^ee%}N@gPDZR`kTNy+K4F|UXie4L=DeNt&O zlrhU@Hn1R~{2@QP%LV(ur?dY-a)ipA}{xX=I>gw0H+^Bo)ehT~RG>w==*i_|OO%~o5~I_GzK z2!UOk#j0Amw{v4ok4nbgaFwj93Vt8LYhf0M&F-fAIx%K)j;TG=5#xSjn8}H~Fm3Dq zXqMgIDM6DnKW&97M07t;66XQra1J-K42NsGVQ1FD=`ukd)dQUtYyZO~2EY8@>HYb6s2IO^@{~Y{vsD+j1_og3o;= zfb#S>=80XoBE6=0ol~QqTIungg~4lDsO%$r9bBXI>LcNG6es09L z^m$WdHkNUNP$RFl*d;4F+FJV{Gon&nG-k1h(#__52&rIyqsCNelBNO z{v8}GxX*#1TipJX7kFObxw`WKbA@7;y8_0a{;IeaB^IMD4h8r+D}NU!pYOdc>J zstYT0J4H^sSCd!kSZ!4RHLwly<~3_1j_Fuyaw5K##A64(fxU4*yVu@*dY!`$TpLz< z?_;!;lh-pRZ0`6r6Af4+cCkQugOh{4vbx9FWdwO6AzW6KbJGqV9lUh(O-|ex`m~a= znyw+!MZLQ%sU)48G5TLHW-r=@15kCe>G2%oAXVqFuu!KvGuqylVcMS<=swE zEX*ccxAPvS1Z(DKEDU7Cc&`}wycXWO*`G`Ab4G0PYi_FC^Vf9*c|Rendx(-k&xBKh zcQ1DlWz)mOI_U$>icLC-hE;|!dSZT%Oa^D53r`<%Mr<4#oa6-fF7fn zX5p2^;Z^A)&WV}X60yQ4QCC!HK1!^GAmWUx5)6_)=Cs)Kf%gjKk7PgP;13Z#P68)H zO`L5(YmcYCsJMD?kcXDkWS_z)up$Hq>0)&nq(Wjf0?#KI5!dxr&6bM&(5OEQ$3O$rG}1>I$58pIakP?{45lx}1QY zC(zti7Ko487h(gx;HX$HF*3C@86mzX1lDY`XCL#SFF9h(>%P*E{+FrG4r183g!{46 zSDX_w_=h#@?G67D!h(ObRyfA>&@qv|=Fl}4P8!nxx(Zi%EB-qdF4rNQzTupM?)~a| zVt$iM#*q1VhNJQ3y3?4~Bg8v>%c-&Ho2%Hn^i*9_;V|0~=?=ugzkQ{gqma#d{p|S33 zOhHXZlpm79wGAAaLQh$mox&Wkoqpt;So5x(xl)r6<;N;ZeuER|!(N_GKXFE^EahRV zB6T5Aeo6|BLAx7irjz+|I<)AUM-Zzi_~M{Cf4_Yv5*9 zDoFa5l%|y_vm?Q_pqnQB$`LW=KoPK4H7q9DuSw%{9DOcNVC>DN-#8{#l3JuqR*?2@ zsm(FU4_PnUZE!7~Gh(Y|qvy3tO0eG(#_w(cKMa3C5F+lwb5P8BsTC&QzfBb}{z%4^ z!Kge-`C_NW-h^|E(H;EBak1{85-Q`zBCZdlT{Nhx4E$MzEG?Q>jKJ{xi&JAWoiY7E zRubp0XMoYNMBnh;Vrkv{`BM*3&kmD>xz6vD?GB6vbUc88NO% z25Yv>5oZ{(awP}ErZ_{}ys07eqf~;a{xsv17coUxOjmY5tnbSYTM_Cyf*ef<$0nZp z+iIt!M!qPyi(3mYXPfS)V;mTBzfdT@u@w}Bz6#M?y(r;doQ(9%Qgp0c{Rtx z+SZuPaA3&1Iw_pqa2;I@$GOV@r9~5H%e!Zbrtn!O%&k{pLQdMPhFr z0zPnTSEI9tFlAk8c5|oS1<$N*tw}XYS|;?X1A>&jTj&*(4i!0DO1Jb(#9qOe&X_u4 z+=>i-#o76ZJ&SN5H}|JoJ0jNn@Te88l92jssLlmnzb?~`SlC8`b!T^byWAFcTL*3q zy;xT>tf$6lD<`*OPUveM7(pAUGds7GCY>Lf(yTa+W<4=aA(P#J)_Ab})h_c&w|8J{ zq?3S^R|%;JcPeo-66%+g_0wt2in(_5w6T=nah7iPZBV(&a!6;fgJ`}c<};@|E9Ma! zv$o=5D%KgKag4H?U2;g$T({eYdxED~;QH`%rekAcRHc*)$s1(G2`dX5nFV?eI7ZEJ z;&zr3W8Q=M2t@%YpG{@^Wb&0!PKH7gkr8YJ)q%Wa6TmA&h* z893e3IkDkP0#-UDq$XUGIK~*m^NRDK_cn02p56L5<+PXuRP)@{b;N0rQ*+>AOhHuq z8X04(Eb55SCgb`ji<{X)vG8bOxb&kjR|Ox=cnbP2MLo%CK zyq*TG9J~La{G1-_s4e)r)qAf&C3}&BzhX>#5mz=I!fdeftS<9t=bDus>iF0kWgw2D zXP1l=DZw5_7zcbz%HVEnF727=r-wTx=DDg(=%gY2M^K-3Qi=3M<-u+x99fYb>5!Nq zIwn-u#YB4)Y3#xfkZP`UE`H}9)1#di^J_@LKAIAho{*0rQcE)1Hf(O>Wtnc{v<=(s z=k!M}(zVxeutsnF0@lK01wvoHTOh$+&5WzLG<>fM$Nl$b@teeJ@ z_6l7=+E1c3t*kWDy7{_wohuV0ucZ+oi=VE z!eJKag=@r0#HdQt6@`5fvDYxH&IBDh=yw)d>BXLj?Ra42`diIZBD=wgRRMtV^b+RD z{22LEJ}ijg+c&WL^#42uvEgG(Y^#fj_EOR~@}8a$oP2IK?V7;3H=7Z+pI+v;SPio+ zRC}pl?|np2$d?o8K?zGuTw?SJN5w|pp`Zk6Nd1*mzbFdCHu);ojjd!1+1HSe?5iou zR*(9D)?)|O;lJrMPKdSo^0>XBK`D`5OA<}85<>?Ig{&DsKb+LNBNgYAt)iv$?^ajsF%+9YaX1Bsu$#Lq+%o~{*R!{f8p4Z&d zY{0-zZ*qRjxN!x;drEpDzL`YYQ&k3Ou(BUD^{Z$e}MV6wVZzYs%z8rMR|KNY{ zj5jPM+S^Gh1~BYNRAV%icQ`241ZxJcG^GDdm8OcO#SKWyuoBVnz*a zKV{2lSlQtLcIgXFiZzLWsKc6&Kwl)N-m0WaQfLHde+HT zdE_Br1!d*nhs*)rcxmwYm2G*Z&mJ2R*6z|94;S?WRPxC`h(odWh zn=w!!H-}*;^UtSVetqPoIvPwsD$e`j4Z}#GkF+AYI7Fo*I%B|G(s49Ku%MbfN2M>t4(* zvM*?D&uAth=?g>4j8xBq1;Jg!5i#rBpB^j^wM4ooNgUtHH3g{9r9tLjq>DK-7I;^5 zjJSfP*cT_8V=H!e+K`M`ZLA_Tc4O0?Q)AYQE98<6>xp4M9=f=UC!7JTcdhypGA?7`r%aPcu*!=aIQ7e5iTE_5RRFsQAdm;$>1)Ld^JV7 zl*`9v)I*gyt4RFP6z7ync6lpx;pe!FGh!pMp0-b{gh>f>S%QkMl{*W1smtCZUCv3d zmxY;X9zFR(r6bJciD7To%1dJ#w@&DXbOlGn#;@#G?{qV7R8Y(-l6gfi%{7=~j&X(C zJU6D|i1#ZwHWs5=so2}BlDyNchzfla(d=jalux`8J4b=%d*c3(bY-WXi9cJtpLHRF zdum8l{G-XI)rdnKaqJInp6pJYmyU5}tcLPo!_`%UxC#;M1@kSgTrxw0!L7PWJMAjh zbXBLF87g6=W6UZ!AS?W_#Iyf^MTiaq7+aZZf&PBmk)NJ#y7sb@;&(_4%a5%oWdHuFsdyt z;@uz``Z~^zSy3wth?SyBeO*#Hy6oI#7e-9%Y|W?ZIW6X+S}M=lyRQiea(zO~8ZEb% zr8Y)|n5OVflo8 zcm7;MzofG8Hzl4;t(Zk{MRYniL&Qw0lbwD>h(4fU=z-s4RgrH-vP=|q)Wt-(xkC@a zM5%Oyxdk!ieTIR+s63o|&2x0pEu9l{N%Fb;KJBMUNu*nmM8l*wOxWRKj9$un+|#X{ z7wfmixLpPn#k~!=te@szhRYSvwhpd|#X{Q5XQ(S-8AfAGhIOmYVk*kE}EDz zSZ}i+ZxGwfg>)w;#=534SAa02C98>dXY#yjx2rGi;+)u*8tU5X7hl|!>WA=4F^YRX z0`%_Y;%i<6Gy&!N?o{TWQ4XE%^i!L?Vh4`bdpKhff3SL09&?6uO-{rHiM~GD=`S|1 zr_M>SK4$1unTYh8)aQ_-#Yjxm?2>^ghs63zJ#Y0|i6SOgOE7Kh*o3{~*a(%T*9g<| z22-Ml3DzczW=n0)5=MgMa)SSiBV&D>R1B#EM)*on;8}v{3@hU$272CWJmN)RB{N3t}S4z1ytR$-p>}Cdzfv^VvPvm*bgF`3(%$K zwtdnI&Hk?RV5i19%`siSi;4CS(%9b6A(;PbzB$##@vjecR?N-7$L-?{>Im~NVpv(G zaEzZ_I!hDWHX1(MNwJnq1tX0}3HAuV+%86BE&NB=Uh+tX#SC9pFrufVC*q?>q>;eJ zhkgLfe7d{0uU(>#cIqVlX7yoVU|DHIPSD2?%Hg2&b*!M>G(X+4edvoAkNWAcPChe4 z9@A}(kyQ>J#~jo;>#5n?i9X(`u}?aNTBJmJ0!dt2s`h+Xd&D{bYdQ9AKhc>RLnU2t zFUg>;GVmm3fNS{uWz1~TlbseECI(MOHW5ibh0#o%{Wh{ou2BD*ed>**C;Kq zo<EoC6O1cBu|G$TFVVX;>#iQJ2g=n44(B1c*oj%@4hEv@W@4!+>6 ztfs3Byoed#xT~2T8sjExO>C!AxtyC`?D&|gQ8HF;B?dKxehJa+O^2?uE2{-`v!?%Z zd@S&MNXl|C^t+n8a`IB<#5_b8k(Y3v9nU9DFLQRR$Eh_`7ZKs*B(O8fjpY?D2wS0PL3IGBL^gB9YNkGhzyM-oD84deF|poF)*)o|C-ycwOY75noU&%us0m2AT=jyTVQXaMv)P+QOKPkt8;w>G6g2F#=DF9A&>nz|3Imd3s->%+oxtf7J zHBMVO`5<#bD~6X+wE0aZmZlmEep&3M4>>>PT&y%K169%`h5azGoUkCi&LZTd+k0$r zXrZ$Y_QgjWd}gTNb(LIg#>oo)QHR5n=7Q0LZP&N$DVU>u%;B*?-m7RPkJA+S<0Nx! zk1x2@#6&btM(xM+i8X>N5hGVgo8X$Jz@H?T^WV~AydQ)xo{a|`$ z2LP5O{qzl|#pW`Su;M8JCrQy$;l4?vxzP|mNaG#Hm*yY7<-kegX!WY7=d#zyQ{@No zzD*wIPW{~si(L|nHvwE??y@5tLj z)AyVkE3XuctAxu5_Bs9`^GSgddG=Z)p5tR~t| zNi%~;+k>$Vo_^-2*uvK6-e9N*@pB^h?i4G((AOF!Kavdoi zDwh-Rw*+!w6@$}E3+Kx&rr$YeeF#*a#G8V&f3L!nFu1+h%Hy7Un~MavjV+ zS8C$@kvv<Ya(wd!sE+WFkNw8t8zlUp9*j)i9IGV*J z91@#sxA1&!5raYkU6LTW1WO3>1YDZwkTpW|tbvq(kRXQ=LVLYThIBpWVa|ySV&(B% zp4`HVlt_n@#IDefB;X=TI>JFQkB&c`OElkHqJ1+#Es>5SiFKfK@>Ba$dnR4VLBY>6 z+P|BSD3>OMt^m(bIbtE-&U6{a#b)EKGFYyX=(y)qxxFhpSf@N!goJ!sk}5t1KMLEU@OtnaHhH zI6ZP6mjfQ>=*bw`cH`t1Iw~jP@g&wf$Gm!ubEZQK&*|H=W}TWwZuA+EcWWwRob?0)Yv7MY-yd-VhyHJFkqD|f6Xrk&BP^OuSyB2sYJIT?qa~Pt4p_bL@e@l zXrQHrgl|J(c1E@KTDLjAfCLH8}TmACD*JsSqn?H#uI=x&BZH;QC(%>RAynt z7rZW==J@~N3-$=D4WwTk<>Cw@8->&DUc=(N5l#$0HSgEki#vPM>7J209PmB3(h4!; zE9mQeoWUHiOG5}bjR3aX)ZN#?RQ^oQL9FEz1^W;qYT|7qkKMIT_45I)>8v%<{Q2Ba zwcwc5L_3=_+BkNrDk8+-bJCA#lM`dczaGq1qo*oGQ8$xXn+7jqnD~CCu|KB%*n3}sdELQc#oC{T?M7k?U zY{=)c+n~LSa3mgxd^bnMYMwoBjd(~&q`Q-39+>P3nz6gt@8OuB2WHf3Y(k%6 z9h+0QM`eF!cbax|tUGiKE3Bs~MN!+NvJF%Vd5Aenm?oqdXU44NO2MF2vIq(}OQfyW z$37cWwk-;fXe!gB?r6tw^S%V*MHg%VK@Ln!D?E-$hA}C~+NLq=tGb*w(h_`*^ z699V7jv+E})6#VxRUJ4q?EtoEYF!@+?i^$<g!@oQLP?_yBv#jedGVH8Oo_Ld%JU6jkSPxq+l%SF@%fztnK(~Qh;TlU@ui#{> zpY}K{R@WTkSA7;;oj#Qh{%RGw_g zPJhcX9n^N7!ubx3x#Cx_Q|p={-;3l!Q4~x&VJ>nf>Ajt{K2$_JX~lB|Y2Sz1e277Fb`>{T&!HeKZYM(QJk{Ied_sv*IC2_dl+HtRmz=Jy=Bf|9`2-Ak#ylz?eZ%V66F!3 z(BaTon4QBZTzlsbJMl<|!~*BbvsRj#ju?+pNy>0;G1TeN4vCdyj4fb7qCAEa);@s5_7>m4xvGIHo!K{ zh)abYl8Z)&sAyJ+-M6oT)>FF1o zVnU)kgA`j+N9VV=4t*K^b5eSyV`7$sJ+`j&WOXHRo<+`~cxUK`@I7&DXL`0X);!E* z1!+Hr+H8j9mEy2;JG8$wkLbYkT*t(k;h66-ASmkdNWB=S?LAluc)nAD4%5MHsnn40 z3n*+uXf*)!a7TJ0z0e_(;I7`RR~*kwPP`Y9H#CNJQoDx}rNDf#GcVv6>XPEVgj~)t zY}-_^u>sq+vmGY?;^;%@rsWzLIvN(KUN2BZXgIbrn4 zRDFcqNb!m_lE&0VG|>8W6t5(Srbuz$;9Vesy|q8R%2Bb2;TZRiv{b5Blf|J&7cZhA zHf-@tC&Q-KI5jp&lalqhN{q-V1FvNUI3d~FZtg-%4*QbwBewlHI@Z#qVQ3|4f`Y!D zQ0rgpq~Qrn{<_>5+Q5Hyr8lgR3fm3d+T2x^p9cg*eIu#YKv9RMhkab++@3Cz+Z<^A zCeOp2@xbaG!?2{if#F|{a+I|vJ5T5s{J#Kv4nSQeD1^n`pXkrw2S zcaB@E@tMjY_`F^Mx3H(TIXJdt-_JtEEkBQlivD)ea~sRIwU&cK(mNa)v%+Om8C8iQ zCfGX(llObqVs|>d%PH$ZpBJxDuXBL{Qhqm;X}I7I&OLSO_6?_>LMb1m;~C-zFiUZPg^1B^x`{6A93QQ(ARM><9ce0 zwDR#$=EJ^O*``w>az5tdSluq$a#Aj_r!2fjtdEn$uZ9{(GqJa{KYhYECxuE_tKCIH z>Yt=Kn`wW}R#&;PgJD7GQ;vuYh2?pBg+p3meVVM}!739L1Re3>d&Y^j5sBIIY2as^ zy*X68F%zJbw3V07GA{!bW2=i5%B?f$a}J*j!B?;ti=23$C+|=Q;!|LM1tWXD;Gi`d z^I1XKU!=CRyRwrFXCv)PUvl|a|1qP7bL&$IQsR7>9M*cSl_Nl`~cFIV_M z9bvvo%=on*JGbs@j*FQ~tFQe~R`jovPIF0{$xcJC+)^7SHg4FLzTxPYxg-sHrApKU z1^p(WG?y$05q%hDe9KrQ%>CK7oEmebRwBl&l9i^w-zJ!IS+B)Bdn$d$iLu7-PaDuv z)Dr2tB(bBxJd=hX)@0LC3r5KIoOR%LS_N=0)_aM`(rpe33iy2jt7t0R6_1;>f8em# zs`W@TmB|S4Ln7FT^p_XSzpIVu=||3)#6PcYT}mLANGZvQ_ha&CM3yOzxh3+%J^f^j z#4)p^nw*F~h3fM4tkIXb4jg<~!<@BUmOrOoIWT6BE2Q3dN>qA6{+dYkpR+B}8o2NU z;C}jz<6@N(j~i)FMwH)@!qEUbm(fXQPiOj_Gh%jL?WR2!5#jeF)Pgl~RkFGI9~=|= zRAXvkr6%4V$zw|{&ShJ;`;(JmgU%SobbZUNg5krp`DJEuV%gnB>l^IlcCa%S-P&t ziTH03eUQTOh>O$heem1;$B73XiF6K=OW3RRlnaXaUosCxAz=GqS%;3?3*A6LO>4IO zvVycP3?X}i=)9{d9hUnz*e+efA+hqfgq6orsJO`l#k?q)r944V&%fbO}er;%^3alaEsr`jSM~y0y8^bUSxO=@4fg zbhlQiiFYV@to&1mW5oc3fdeK{W4?_e&<=B8tnwEL`*5RjA|6hn*(*7qWuXbb5LWp6 z=?Le=8uysq)Wt+QlC)Ay;9kY9uXHKLtht(G1!-TJ+T41LJLb^v7Sm;1`9%D7^}bPe z)0O9?ds*tz!H_2nWkpU~$Q#(1hRDlwIfrcz!G^@_D-HcFlL|n&xjb{jX+d$;+kv=t z{{z=guHd*{gmyRWwhR>)s-&`2! zIH$+nvUCir1j<`}G*K1%c)~gTEEgW<|D>xqGPaajp3Yx2-@lxHCC=5!G0z1YlI8es z##LU!L9uUH1yUNTN^%)ouxk=_7{8@COL1GqQo5FdVtY(S=ABd#;MxRmqExJByj&by zh0|-U3}8vmX?#AJQ!y9BaRNu=w3{ z47GkO;;l%$I7*EGXoT3_+9@&D!_c-r6Or_7R2Wnp?x(^AIy+#|$F_ zf});5>NQD48|Q(S=5>QD#N!J62xdm5!xTKzGqEG|B3#qnfTuWqjwdKr8<{J1+Vlf~Y&_;ΜBefN zwm2@<7{_>rD>d=9l1H0dQ2<`2m6G!J7>+K zYgUl<4r+7$%6qJqrxw#rSB}kJ>$h0pdFh@*U0cqD&jvmPbj>Yu%-+%UW2PbO4?b@% zsQfG~vCbvyYAD0t;5OH8?P~P)uiw?s)iLQ#4v#I!uQb4oN-HmSW?l|MUNFJp5zX9} ze;235`cXV;#jPZyepjk{=L zAN`aA&%)oWJ|fkW#+<8I2`dXNW`Sdo)?@F0V@LN}P#9Ktr=1?NY$alER*AZzux(;( zIWh;no7ZZ_X}1WKbxfcGb;Ot@gVX6U--of$-@h--IVCooE>ByL29(6ZL@i zU`o^#h22doN4Z&a2HBUIoE0mSJ!v4i?InpdWOja{w|5>*2f>~D zoD+PR#cxs5kp6z^b24HZz+uw1)B1JkdVU*O9SG?;3{mRM~0`MN#iZD(ggc=-ak@xEk*7 zyjVwBo;KQmk~j|_$Gphem(VH4cs$TivA5}~iw5&lkrVGh;uVvvhbtPeyot?=4|ZP6 z{vTuixtM4VA?-N4MXoB39#Xc1J${LJf}<~W$aE(r9KRw(t5u3ByqxP~Z5$QjI`fP~3z0b03jH{uIak5= zPCe{A!togPsUGjlN&IW5zLe*UIwU9D6NqD5EMYSUJB8uE<20Y>yx2RfxpO!fa3PVN zL=xX=wSAj^r%7~rvh!l=IhBl+uS(Wc1%C?R?Al6zK8|Zyn3+ybb!Mz<8x!iEmBe|P zIK@J2O~D7UD?NRUoH4d$RubnKDdm7^&B-D&qYLd4hdXslhdL2g3m2ix2EShEVkMv9V?a+L!yE|uU0%v z4G`cli*D=rPF!>GTttKykT769Os#9$>5wmU$|Op^dYg88!3yT7A}8L9R3e;fveaoU zz@3jZkQX~ERx^ei&UGz;UP6#N?V7DFHrzQSX4nn&#MF@bOR3HdqqK&6K^x97MCjRK zdp^C)QD=o--hhm~z=7Ymu=p=0pYu6&Q4=`ZtcCbv9D48yXUCkgE|D9Wj*50cF<(jM z6~S~{260nxI<==edk~BDD#ymg9ak}|r|Oy_znWy4>3f@d+BV~v>7~~=FE)7Fv&O2a zi11nx2FyO>qnl8y*Ewbq8Cbm;ctJ*rMLF?aPhS3hh>GSwN`Le=?y^j8aANfP6^Ziw zh6TlZqnJadQ6Z=`y~&v|V{>RqQXP^7G4b9kUa89Hy>L!l15@LL^cLsE-fEGs5{}A= z_*N1*A+NT&&B3Id+HVf^w>dNRUdIfm7#%7>QQuA~$GeiT#cml{-{HVmuQsNT(h}>P zWEBs0v&9CV-sO}vd$_ZLwBJo_+98a@;LUh5{2{%^^ zGVd>1Nbhq{tPYNOT`nft`$=P;CC~OmA6r-uu$P@sA8=%>&vF$jpQq}YB7cx%PLu68 z#fgnOwru4l!Oa`DoU$o>$hnjFi`6@otdU=8cUOCHP)^7X6L|>;qC;k6!AG1D>mi3m z)vO}%k5ZgV#LZq0fh!+#?U)TRq-`RS{y3$%pb9&AqP-7e$|qbsR;T%(y&Wk?`jeEV zH>;YV){o2P>?M85VdvrxSN|B5RB*COiZb+RX6P_*uvrqd)dJN&;~9u8d(`$@773|; zmg>bEUTUGY>dmHJ`kd>>zE#Z}b`cRiPr^l!e=ci$!G(hv9}L8)tz~?XvK--c=n@vF z)0bR6Hp|nqRuhzfkn;XzLfG|`wM(9}gaCKkuKg7!#k!s`i=Qqg+E+Y)hdQ%1G6$z>TnCguG-;2p9Pphz~A!IB7X0<+SR^zfLow3qVF=?{*c#2>8wUB?U(H8~OgNFv?Fipj%F9zUi(IW0CM z`QuhhPnnolerkt0wwNxp(}ixR z9dNN4skH>7DS`ftVuD>5n&wBtv64Oh!^BSf&`r&>|8z1h;>?(H)nyFosR2>3FG@Ds zIwDRMnv2usL8dElxt}iP+*lXt62|ouN}C16ytv9+Ca*YFopWETNYFPjoi5?r*g<4t z=D`l_5WE#mo{#Nt{E;;be~1k3kX!mvKOB?l$CoF%jusmilytRPHg1u}j_JAG@5RVkY-^|5!~? z+{=^8VW%9Lx@&3?dVH&6gk2m7EldzLS7e2qmN@+)>2U z+S9J~9`3omva=4lJ!K_vjwWY#JS~@aX*@l~p=+u4W;fO{|?O-7VsMJeAJ4ZvwC&b=Yem3{Em5dPA6rzO>g4rzpKV55$ zi2SS}YBEAxn+U$B67@IRT!zp2I?jl--7z79CM3#rRgjt1;?g{hTf+aR>p3L$uJW^1 zkeZAT*C&EACRrj}YQYIsQwX?8H*i#JxR-{(l&A>`dP72wV;#WW_=Zgx8ZfsyGy834 zd}pS^BNqGVM$Qk;Y9N;@#Q}AdjT>A4!C>UDvd5_+NgJU+`m_1Z3CfW(4`N}I6 zP3O{0924_?4XwOVL+U3|or7dv@u#;at#kR9Go$93Z%XzR_?7JNOzxeA@rZCR+YWHDlVeL2m58CMWTh$a%?Re! z1_rmILZ+KLYaM>O`b>8qIvxTNz6FJ^4hg!<;U&Q$=PX)DJDs+5^F}Vi@R*Y9d%LA0 zw}p^Ww8AN|qP#M6D`x0e0NjU$@2}Gcac<0Xd%lBF?be=|&7sV=n&rk*7`?NP+~|_@KXpc&3P$L0n_cBx8~+tB&2>S)!FF}griL1MwydON~bv{X79R& zmCI8MaLJ0IZV;8LQNFiq1_l$l`qP~n>v0E`wuZz6JcB@5XQg{y>SAdUcL$&8teD+2 zrgtw=B5fpzRxg5`7njpi9}`dvXlFSnXgF0N)0K)CXOqD`*w*z^@o8psu-y_MQ*XEGRa4se7S&IWZz1XVmXDf!!dT@FdnQsrzwZU96PmLdOWwuL_}zbP&)9b zSvZo@v=d^Jv@yNE35n7sg?*#P&!rhh#O(SpF?dp+r8LlzwWQq{s^*qdvhW(y;g2u)e@%!i!Fx#A^vO^lDvrjGU-lQf*k} zicx<*ZqVsDC{}fbR-LFJ_1#qGD!VUmU?;@#KD290@jFxSYhke@Iq&RH!CcE;s;5Sk zm5V-e!2xJ;ryn*I41GGxL$wm6eNoF&7tw_CgaI?VJ{~Vun@$AR*cNP?jB%HU_Y- z-_)Jmj;k`$bh@vDVpT{g_D+=;kQDfS1T*?)W)aSp*4%`hGKeKY?6Gs`B>p|nB@_jN zj>rl400L=hV3yo#_HFTP8CyKk105HeAnAF-DS`h|Ot1$DrrGEY1d^YPweJV75vJ!2 zrbH1F>>-44Him?4|O9zi$h;ZBOp z`$kAECDbE`qU}zbXK$jtl^*GoSPwp6E@chLKZ^1+aot0P4+zLYdbC3(`9_zPdi`6T za00eR!^+-|VC=<-Zkjz|-3i(z?gI9OJXPexdklHkg?s_Ja7;7M4r~+{3*JtAMd$|1yofsmYXVBR~7sT zg!5CI6Sjjx*k{rc9T+oKhy1rDBK;>(pXM+JXZkp8VdTk9h#9NpVJlBvNR+3LLZ2pv zP?U04da46r$F*qaEze8$Y1C!VfacqszIyZYbVtPI50c1Lq(oa(dO|*fNOsMr#1?oq zKV5!G&vfD>{(beIFnuKWR;Xm+hn% zIwn?@k&`5;BEX9XC~k&@UA&a!#V)^QH-iaC`6X0lA6(yU3;#U!%{d{~HplI@rI1K3 zC5f|vW@o0A53qQdBVuETJ!#*;HKhJ>s36~S_ zl>{CRK~zGV$T(xSa=yw@u>#d9ur#FqYUu{5kmO4I}eeJ!Ch zM3)yktl+qCrO9b3S}{FOuXF5~p*LAqF?9VGE-U!!2|pada!GWt59tk#j0NxMS=vwf z89HLTkqlNsYp65aFY_iR#J<_kC`S#cznSV>%TujEkkTg1wdc3+Xt%dGC^k2e$Ur@; zD+>EoV%dfn!`y7`UPeH6zQFHoj*JuEzPVNwFUmmp4)eE8&g)9zk+pQB=L*`GI-r?Q%8?-$Jt z=SHKw8@pgXutu6cpFgV3QpswfeUP-nQ8s*ZzG;kR{q!Lx#Tp@=&5TMz`X8pgdD-&C z7N^b6pW0kJXK}vUoZfteHBBJB7c-*Hhc5NR z23Iv|hE15HxL+ohCQFG=#ve5IYYWF$92=V^jfrut>xubQGT9}!rY9EqGfQ1ta8F-z zTC4%!X)E4=Br8b!+tg-#*H+1;?m`#i?spszn-=MDD~J+VOQ`P>MVs8K5EcvixVgNYzUQ>q zi*tp+N+>-MzfU5)49d_{r`bi^G%1Yw= zSez1co&S@5vPRCBknQ{{aehh;D|pEfUTgj-{mfCZ$^V!lxtM4_7md~??XCXu3~sMP zD8w(E7W+^G=4xF@s9zGr4^_ER5rvt4<($~mcEnT4UqDEhUlYR)mvg3(Wxd}xZIU@z z)tPA`R`LftYENJT2*dX5 zXUkjlPaE<-?c|@0#VX6cXO`KmWnTx@xzZmzLoqvI#7hDZ$^Vh^v?DOH9F7IyY$5&0 zVH-oGuj!P}F;-XnKaH}B`4rN3A6^bjLkp&30=E0 z-N6RE^e<<|+!DpK$Rhd6j*%7n-(=h9UwpN8kpb2s(tn&AGZDvn5~U{Ge~IHq>|xAX zTsL9=S)VR+BZU>ky2kRfm2N;uoC`y6gtIPPc9^!)otvfE(P?yw6p4slS-3+EE{RXpW_ zVjfDSEs~aS%l>kj!J1S$%z-gySS={g0xpY=IERzNPlHBoxBJca(h-h03IDMAV1-B3 zPZSBMA4&BiAmSHR_gkHGDc6s6M*gr-JOwFnE=|tiEQO{fi|uq7$Hbb8Kbs3vDT#7f zQrHWqLqx}gF%@pT%Q-663rIk(&3bC733qwo*x_koKBo%h9`Am-f+J(D1c?|@iMpb& zS0r|zMzqtF9Cna3!kw>2QJr6^v}f)iNmq9HSe+iz7f?qfI9iO-$>a2ZUOHxtj4^Xh z>WFa_G0Fy~ejB~cRoBQEvpI@7VjN4xuqU4G(R_5xaZZa3d3DFT)q?sjTvqVo2|orO zN^Yk!;TSrtGc#ijyL_$S)jS6=V@gs+S7KCGS-85gu%X%Ce;Q8I?auZmPU-JD9fsWL z-NfLFbPdnKX5@ACzN!tW=^|y>SXo=yxTdn9vAfgHhgagzk5=mRaAI@1mS-dOCM0E9 zP-0Y9S-3W{z&45dohPu^unbG~I*yFFOZlkbDhF`EX#XpgJ4u$2ZY)3cem(n`N#$1a70`?jPegjD{*ON&zj8kGn0c}|157X=o z(n(I8#2>8Q4HSvY8<7+7rX(Ig;%p~x)oSeSr<0u(YZ!bugK8>5+>8hg@@1l#Zmzih1sqmC4+LRRc7Ub4#nLhU@VQ)sdr86&pEh9DIZso;96q#MKXnPwQ*mQ@< z+pX7#9Ec|yRul3zMAFHLa|vcp@`=PV==P=CIxObo)Z_LEmB?B`-AbD3P4+S5E-md(r#UQE zu=03r1$mxbq(s_4lKCxUcXzPwHl6OE*vDGIhNqTDXOKk4MX@4zNZcarnfY|4(_-&% zj2&93iMNqF&g9BVn3+dND2BgumeXQo8}kySCDz$w@h!@v$WeovoD}<9dfL915+)_k zW`Y=GhcN01{4Z^BL@XAG&zXD?knmOt-yjPw<#Q=o-Zjo*j|Ps=LnGw*w~emF_F(s% zv+sfjSML0Q%91ODe6I#l9`C?BvJvLZpA%CjpOlZ~NZUOpF;{sJuop2bCfW|tSbrA! z9Ud3Wm$1w`EOt69R)5O#1{=~6>m0H;<6r#aIW^ja`-I}q~ObB$li^F2? za-jMSY6*8&;#f=aJ_=jq=uE?Zp6=$bSS=|6RR ztHXFz(PzE18daNEzA=A$N^?$*d5~5ZTPsCTJEE5H5nmby3?YVZazIR5=<-jOjMtxMP|_7rY`&Vphtka;~Y$FGC=E>lLQ!7&Qw?V{(T zjmK8+Kr7+k+S@2one8&O#jOVu&LZY|o}JhvxmMqcgw%Iaov**zG{&#QV|H+ByOu3t z^{kS}RlGVAr6>y8CzKO@Zjf4XN~|nH6MjfYcA2t#Q|8gg4&FVk9$TXva)y|Q^!HMq z256at(g|DahkcHT8K7gPv6Y&5`^lpx&u%Kz#pYNy$0A2@C#Ulr96Nx(r1Iy%tkG{v z9}pG)UgXnOT$sKojvn2c?(MLct;pw$RRkn_9~ErCae`A2=WrX#)PDXa-Pc($$DE{$ zr-V6V26UB$`!Nd~3I(xMnpoGD*XE@a&;?t8o6R{dJq*OYA ze9`)dy7KW9=EHoIgP{^r^UXzUv-2#(mZOS0W90}8lNI}EWV53!k1?*hi4d&m^mOOO z`n@r36c-ci8KhkUwEk|!x2`Y8NZV-%JS@S^VX*@Q`{|j^zrz8)zfX#bq`mXi^_8V( zF-zvQ@(vU2jmCF+w&!3nlmm5>mIEd1z7NU?_#6WHwY|0D>^js9obrYU;^#Uu=9zX8 zgL; zkUubFpp&&*-_rj%_yP?X(i8EeB(hsq{B3j3Y$R>~g%c{y)-jBI(E6JkwpoY#gzqP(4yx(j{%x-tcRhm&H%`8XH4 z#6){1Y378>#-f@24OI3RCHN}26*=&@ljmR0Cuae&5u-Fs|4_Z$q z0V%(i%I2)cQNTQ+XYgd<_c>+~^wk?74=Oeee;APy^!t@Er#90R4xri5!V!rR8y9D~_EY+t(_=<)E6W&pyrjzgd6L^mvkU1<4v3jT{(OFsgKDCEnKVwR zYhLw*?w)pI4pZ!I+K+36;U)Nrqc`DCSMR!9%+Q`n?Gb6^yY-$4AyEp=$*fFK_D(8$ae|htg~3wwgZ?)P;i`2-*Z-MAQ)qw4+x6- zeNvAA6|Dq4C!B-aZ}|hK#X87Z6LS#}en`R?-^p?h)?xaQlY-q=-2s!^Tm3g3I{B+J z(0)ud4TVyVe37U>*TkRw#Nom9ndP*Ea4N%&RaBXON-ix`TMe;Z>u1i284UJhCZSid zu1V@Yr~1XMR5(vQ{lb-FA8W`IK{RI-9QR8~bF{>HHi)LQ>ym!u`X}KZR_~K)N8uC+ zW&GDvms=DrSk$!NxOi-yR;y_kW}zVM-%>kUn`oMSSnj`b=~$08^j_d;FdYGYPk=2Z z_e7YCjgsuuMxhv!wOzfzZ;p0ZE3g+a_Ft&2R5pPrt-IViS5H)g}{h^Xj)C!Mx; ziPM>$r*)s#CjQ~v*n-S(%uh{AxPKDI$zRP4*qrLatN1U6P2!(dZ)7fDU4*9^4U-e^ z-{h5##)75)IA+Zsjn7N>zbc2%H?TT{&^}z4cVjJi$I9UXRt`@UIq@zGRWn+a(>x== zmKS#~cj3Rih|^+@$MU$b2Bk#0C`oK%0Q6F)Tn&|*c)FP5Vy?fUVYo4RVqTm~`b~PB z9=3PpqFusSF$*R?o8Mw3Bg7?%I1&iXw|E{o4zz6c(jg9sRdIjXIG&=GNQaU%X1KT2 zj&zs8PlV8t936YWUSIDaUiR~(Pf zvEhO|hsECNn9wb0iFIkR_*Tn^feHOI_L9uCJF|1RFwcQ8<8a)tQx_EVvZS(N<~{V- z2!Mk)FXyn>uv(tYuhRD7n~oTlCxaG8e|fRFoKMEPf+J%44DzFy;3@)KkpT9Ac&D~# zbR}1hwX7k#pvXx6D9UpLf$7{`#{GyEJ({CNuk57QcX0_Til;zKCMf37WO8+)(}HKM z$3;9F;jkU+7)Qn?SWqz7h>}=WA?rx6Fn*uA@$?-#w{F|E;k0yB=fplyZS=2H#5k4= zwxrBBb?cU$8@HST%5hGK)wMy2>xgqaIdqfzL^)eqnu2p#IbF?}2X>QdRnxV~M{8Pi zZ9XvAC-+oVF0Rg8a8g3&KW~Uyz|4&Q%}Zg|aQ-Iz&FYrGu!?14_?ITF3|x~LuoY#w zDtZa0;J~$eEoV=L*eiHGikx`YCXd~SuSiW#r|UQ>=2zFVRy-we9-tuCCFDpzPzN~E zolV_kT;g#($E>-YRw`m#p9~rY{>?GlZNPWbnckOf;H21O(lrL_O{FO64N0Zv?{wa# zbS@6|oygt)6!+odB0UG6fj?Tk!q^#6dnse&@v4H~Sa5i!@@2)yAb-Yg1RPtFCLA6! z&RxdXo*EDp`vkK2y0sAle}x)1kK2Q=$`?TW~>R^9v366 zTO)GC7hME}Tu&s2kb!LgjrsP>QaZ_*llX(xKkkZq0ZdNBo07;5*9~q=Uhpx}$&QP; zKK*%nt)9wiqTP(Nqd~*5a5y&z{T=RKYTF)0STIej)~1^~H&%y>l2KPw75^6G9|3-6 zVU`{7#1iiRO}BJpaHK2`=L%F)66RLKa3*U-!!9^Pu0_(Qs_BV!||J#9rC&=KW!q_B0=W}F>t%f<|L(n+z&kt-O@ zQ;3T(JrPeKk)h7b#r^5_&WO39hDHy}O?(y$;|>hgbe6MXU$S1PEFtx?RV2Sn1y`}~MwYqx ze%j=i*r4nhr3sW9St>3zwnX(IGJpk{;(-vHCeniFB?ClbuEr>AYq9 zbSLM;%-!;^y|lWJD0e31IJ`2C*0J#WhQ-q6S&hwR?&91n_~VtkFPB*%=!as%;>yci znHReC5vPHJLAbJVH^;6&Ahw!BTbdL$n84(q|Njg0g)AQN?G3BtBmln@iMJdlqw?*A+QrDxn zn+XFOhBvDwJb$_Y7r#}MX=m=h69->WRwO7_ZRW}xgg6kv7vy*{4gCFS#5y{?aHepkC#>4@91!clhRl_$BJo`m zXDyjounBIOclB5;!E;ti%Jb4)pe|dlMY-_ajkM^>v9-#f_!w$Py-Rg2%h|+j3jURz zi^nX|p_d{Z0d@-j3t?}wvvg8Q4p?)1hzUr!Pi01Iv=^Ek28QvQC)`+_XD+#grzNLt z$Dgj;Zq(BaiP;w!`rQCPxmjjz=)2O~wbRFb_%;U8Zqh4Cdpt9-ML=m;E|eG}tW4}> zCTJ6sH8#wjXLe67rhU$hjiO_g=!%p``$;+yr1N_1Su8~_z~9jCr}G^Yt339!m2N;s zlzWlFHC;?t%jOT{qtS28;k?&|?Osjyc4W*SSV+DJV`|6cT3Px5U2YBP=Hv#WQAnoxoEOKG+{!WTH#m9`Owe`UVkU)<{f6hi4&Lug@ zbf?Zs4|GzjOhqEU%n>=2=|LpY9_OjZHU}w(g+17DF;ijSROOnGKo22^&QpXrz%g%e zv^bFqlf{RwkyMM57!VTaVJgz?n>cF=L3ic9>ERBFEoPyovghp;4v7i&2o(!Ke=V%# zw#^b@VBsU37W-Zx|ftKc@0!CB3)Dk;4gfnrYp99S*GJ&KOMHal!cc>G4jBxkQSFy|^)YVm^V) zVJ8e+;c&t{ag9Wu{~6O$m<^hqh)*K%+9>6)cLP>54hJZ~tJ|A<@5o!-(~~_Hcf&KQ zcU!}%mYLySjzuYhPthys?QhLbCD@hC>Cyo_)iV>bD2sySVMI;5r;&Fg3a7IiQMYGi z($k$48wd5Y!IUs5fu2E7aS34n!63-PN1o}NHM@jdM1*INz@8H$RSp51$rqA8+aa-@ zlh0d0hJ*xq4neH8We)+S`Pfg`@0^dT?;I8TM&oxO6gBalM;=X2x}7X!>G_UXvt?)k zQhouIIr+vlj{PMDq_DniW8!q|2ijg%g3}9~ci_plY(tmJmG6M!%FBzG7uto@B+rIi zS{w4mrx$xh4s64?#tL455oP7#CCr6YQ%wG8lH%mAHfIZ(S)fiLUNXJT z2{8|%9yf>*Sxczb6UAY8x%zo^4WI}-D;TL;@ntk(DMdWqWo8|w+dDc zYg}H$H`t49U2k((?0b$m>`_``y`3yp9h`e>JsBc*aEMTPhofTCl-jYmH5pOfsS;^3 zI_7|kI!*6#OzidP`TWhwHuoYX*t-d16|L47`g?S9{CgZ2^Ws+;=7_d)Z#n$BqNwjB zm7Y!S+A*1imlel&HqhU}3-&%oA2=k>1y&dv24%&6KlzMRx228r0f)s57kM^{Wnm(c z{vf5z8IdnlM&sk%)#*ddh;@?1yI>JHGRT~ zF`tV)UEXF*N0d*J!q2w3+1k2e;*9pv&Q5DrKYhwEv8frxD=i8RDhc#yf;h#*pP})$ z_D-kIIAcA2zj}8pfGbG*v(z4*g5juDOu;_qjM${l%^Z7Wp28H&^hEqTiL`wDFLWTm z7aTYlE0j#ML12*+?~CNI<0ys_Zhge3I(5F;?{A$tkDcU~oEuvb8Dnvch>HGY(rMMY zNo;FcW@7K`?yv@*zTz1;uvNQ?c|EGETzr+eFgF%924m!J&EaIN{*s*__ce#d2EBnz z#)DeIeVsUt5QA|*$zpuJ;k;P2S~(EN^u+uonT*XVy&!D&PWqO!qAeb4*t2EwmpfzX zi1F=F#!ULo|Aa9k#&^kJV~4M+1X6y_2{A*KPg;|335kE7;x=^Wy=J{;C*fFy#U-9; z`UA(rK8#k4Yb(fgJu!bsCbyegRKbrN5?hfO8aJsS;U81@NC-1BzO%%!4({gk6DP#F z-}1D*umL4;eoD^K;M514DRgQ$T9@v~=6w2@GdJPyR_~7MN(Qd~VnAA%_&GDd!A8zP z%+q;3`7fLtvk~+-Kbn5dQX>75B=!|@yWathu>IK|_A95ws)!`4j7n5`LjIb_F;jn> z{n=S+*ftVuuygi-JIU3|JDd8~m6eO%G8adp;MM5~SUpd_b8gI1ALt+RAqSha5JOZRF zas9<%v4%Lb#8oMY^jDH>!98F1z@ENkZWCUX@i)iCg1*Kqf)5Ca`gc;<{PW@_4nXYV z*6;?;uW975CjGV_LlIB^(;76E+K3;Z#ftig-y9OAI9P)20{y5NE{&iO%jKo>h84s%NEHP$8UMb>|jHHAH#SgVNo&^RwY9pS(WR7?C)vm;5N zixO@N8-UWKoUjhRUcKYMgH{p=DF2tH^1z&`-0Gb!lhA zX*j`bshtg;D>*CHilkvUC2E3#9!02CxID|4x7f-nJ1zDC^YBwTznYLJN0Y*-Z&`z# zUS8-mupY_?lm>0}bc_>YQ{OQQyCb5aUxoBx*8-M6;99tfw6Ke4=`Nzf{tZk>`{-3wA4vg!#zW=w(;W8vmQf}6X15HwrBio4^Q!F{urrVWv zC2hRg-ONh1l`=CkGcz+YGcz+Y|L(an=RVlC+N&h}{n18G@;jgNXyDGB8S^kDcWCst zH?BQ7l0a97!<9D2;!qFbw{fWF2(#d9jzs~z2`LC+Vx-*^G`!B779Kk5u{Wjm^^Y{5 z@4hDu1y<;hc@&tK2$+p9Xn<4NN0?}Pv{8L8S*D`k>@X)v^v!@iTF~~GYS{yk%B7yx z#l*T`GFQ)>OkYONyo{i)Tfi3vR5S-p8U$$$4IYE;e{)N-;;YT_&D9Z_WZw$xT*OOz zJaIZhdnPo(O1QPbeNUt!;x!~n`fWf*(Ka7+U-y0C+pMCs}OMv5C;f? z6V?|bO;swOnn8S>;i~!SQHrE>AZdG~H)!ui=9gxtOL^1{#~RHy)FFEvCkef<>`)WR za!F^$0g67NB)HPtep?swT zWb~4%^@u$YSacMn`KP<(N(4nqrIU>3D+YPKGI@~12)i9%NHUfJQYXuwY!F{Jz7%*$ zGL%0B@@NHX#;}Z`9CvIu)d;>JAw5;EArqng?V+#tiZSlG%zKoY^9}~_4bzOQIiW_{ z9eFj{RGO8o1onhG8OV1vrB;@TQ2)-*N5fYybRZFr2~+p=7)tj_B&fZC-sMFYtSrig zP+ z+JhAhY84{obWqYjQhOku#Y#BC5XZ|^j&22U=plEjq5`$I@CxjW>$c{@T}<9rBHPLF zdGO^IDRRyPCl}mWYRjjIb&kSTWBH1aiicuiq-_HYEt?piL8XVnE2Tz;?S}Ma&SWa_ zh0Bg}s$}l~8%>D({>D3lEEX)ycTD5nX>?zb{Hhx#=Y(nCEEvGcpX$kd3%W)2F2nj> z{)$>QDv>h<4k}qxgt;A9q^T694d$z4GYNfzvP1lY+J?~Ggl01U@Mn!coo&zwaW(#< zu#iv|9+H=mawMJuVv3x!_R9=JdArXwlCK=heSS%T>gPfI%D9OgS!1si&Nt;_UDuFH zFp<-#dRM5%erg&4KW*!o;clkwt4k{e(nW@h3&1!)Ofs0DiPVE|ccb{qj*8MR{>s91 zb&zr|v}|#UbqDWZpiBHhM zNs6Q%NVu>plD%sy+L(PqO~_xz-^c8Hy|C<1<%qioIE{eB)he+5Kuy+GZ^>v;2!{0a zeDedd3eEo_i4r^j7$Ze^sbx)>`l4(GB3*x3mOyq~k&)s-STejXgR}5?R7w)Q47j$7 z;^3HkvYHPP!d_!K+eJ}8uQo*;OpLUBpxs0+m9tW02WDA!CDb`@Ezqk<^e_XZn96SlvNlbqxU;UL_{u)ctGrl8#HFbNWQUm#26uLa zyU})i#jCAjFW00=^8Gu0DiA$2X5*}eZ-;0_lC^0)sfyx}aKkl<~K(dVfu3M7D61IK4a?UeG3&yWNxw<;NJ)*Cx-k_T_Y{ zJ{GDs7S(Leo%}{VlfA`7q;oyaVCS!Tf$Z65YR(;e5{R}Q4_il!w3Y4m6P{qsF8JR# z%UCq`M3_Tq-B@1goxWvjT%4uT*$@xIlgyH@{DRE9%%#_Nv09VpCj*UdfD=C2gS1IW zc#2UcTsMLWO67sc5%^R95k9s;c77>5%}~BRj-4C7Zw6xKtq1UQP_7^p%#V78sr$OI zrQm9AUeq%oi+U!tJW>KX2F#G;^DG1T!U6eFlS)P2v}c2hHmbDmWQK+`;0Alz{Vj9h zIY#uAi*~-=I6I1Jq&*ij1Vquk+t=vq3C}Z(FAi5|7(#*4&xiD3BAuG`(pi$l6?e@FHXS+Ppaly@$EuBuVOvLDi^fBcMWv zi_W~nc)nL%Hj&kn6~+U3|1HG)QecW7R4cq}jg(Ozl!aPe4hn)3_AfEG9bRD^UwXEk zUHovN#oP8ukkDjCe>vSvR?}}Zf<&^XJv5!x!Bhk)cLA#j(?Ut8pd$1m5KfJ+E6Y}Tr?Ku;OuO$~PO>*SD5j-7c)}>u$Wfr@9pEb;RZ*)^lr@HT_^9$-3KudAR$&fCF3Jz(FF6hG1x2=6eGuO6`Tm1Rd!jkI@yhA&Z8wrHJn zLUED)cNx()44X8RSD{DdyU9$0`lq*+wn}ZX2^DO3&l;H(i2->%GT#fPKE0IaDIcE< zV7$+0zE`9$(2RUhl_BK)fS{oznJU@?s>u&A&+Y>T^4&^P(3@#T)FbhOAfi=i%SY?# z(>w|vGOVxf$P;6;(vAwEWPccJO(gF&7n-5AZ$Vztj~LLG8aIgN#?K+sA>*T9pfr<7 zma-Nrrdp=8C_ZK&U#%1clvCCs?Bjr?;SmWwXkzIn4C8B8=E{&HLG@2U9k{af zPno>${j1t^^SEP)_%smL6GRq8%8lw_8I{i%&pBYHg}P?i=&c(iOb4HZ1GK`J(=9`V zD<`bS|A)^R+xH1jLcu)9^oaaCkm;!{$C1A91w*a*sU-=je~}l22&%O2x}CagR6ehR zbYC*cgk16X3N=aS^$g3A_+=0e5W)Q1aG4tZ6{Gp0;-jX9&n}SkRgmy;r5(;TPi~0| zT!pV0%NMY%*doouNc%cysMokSiwq*m>K3i=jWxn}zTQV3Br(Fi2^a$5sgc%ibX%R; zOiz}pUX;93ISAh}>^Av#*%qjmGC?;HMt*My-!Tim(%EDb zZ$|}DvcC&9(vKv~L(`AGXE0w;Q|cin8Onbj@~BVKO_0D=zahKb8N}BbN@wS9XH+8R zhv1}ykvQHQeq;<^M>SU@M+IvC7}__KYe*lX+(#KG#cGqSrlq3&#Av>O(u$9~N|zpf z3J++1rEWpmjb>lFDPv1NGq$h9tmrG}^~n4=m?$BjlnBmBwC_=05=Q4>7C009+={WGwL zyY%-pkzml@C%G=+FNX7__~K+m_rMR-ZBC&r5R=a=c zhPDTb$a@I?Frx3p&q^a8{KtU4zHKg7XjqNR|ALujJN1{tylknrjuZRVi**y3$PGY&T;e9=dOi5qN>@c=sOqdp~0t*!AxWBIB2f1C!&0UKco>6^Gw~D-)k|fF32U)uo^X-`FT)$}02{$mHum4iqRUJ`j$PjWvLQ)Ji z#Axn~4C3p*R77*55;->p2PI4~mKJVeAYW~6r^ok%yUl4}j;zDMIzm{MF;JV8CPp7E zF|0?Zl?D#N5k}uB|9tc-V?rZR531$q=ce=%M>AzR@t&lg$*R!uA6dPiHCSLU?@06G z8zWFby%Xc4>E$SR!7xQWm3_lt&rVr{Nj7k6hNI1buO3@9l|3g+12=;KbY#;vF0;FD zZb)BMR?%4K+6#RP=pR*h_{@aU@SW~Zk$w|yY2>L(_54Q?c-6OpD}2j~@&PW$KC+mr zI|#QnzOR1e+4{(Npi%_g2B4c4Eorf{tBCRV*_8{!5#>}(WF{z zk+TraFzJ(A(xrS{6`}qX>d!Z47He?^#9h{?ucyYlZz9w`llszcO7%zBYWluso9l8V z394^{I-ae)Rc$G=wwu1M#qgT!oI8@xGuK$lTMkjj$18|)i_9V=Pgc3id- zi&IAGDP8Vt^WbZiR-{8z3e&|oaB&N9AHl>1Ny3PIbT^N9da7rAUvEtL|G7`K3HgVI_M#`!i>>;(e{d- zQc;oIS#VK^rBGt zKY7@pYfN8@o#!i&2T6>u9$>i2Y=XGV#oRlyf&fbB8`1a4S5Fbo1WCP!RLd`rfmX@V zt>YC4!Kl7xY9e}}c9fGOcK|LT z3{wc-VK1}xKx$x+~DF zXy5dgyRz!|#Rj>I+BYgo_C3KyJ5-`RQr$?s(U5rjy^QH=r_0Iuyz(L>+#3Ych$;PO zhYUM!l$JbFPs4o-{9+hM$e}BjyDDqgt7iD)n zezxHOhVex;3Tpu8ga~>dKsOc8zQMdKBRy9e$SbNNvOyvOvO>G;XN$5yq5}^y^yA7fuwxlRT(nCQ)_pvRLLu6qHY>@ph zWBB?lrKgxF5%O?A&~LHcUwDKuPLSV^ukCTB-b0h1_#+{%!*#IAigiS)YDcG>OU6xD4&?#nU(-XmgoBLI3xL5eH77~#)G^fv5yB< z2W8?Q-m;7hwYq!56Ab9CeP|RZwx}NObXDDB(h_m&w3NnN| z9gqWaugOwxZ@by*g=ZMXcSU?D3U5dfBkY-g;gNClS6AFvFZ0sFvkW&Oe;@w^Q$cY& zFgXIB4d8(y*k9rJgp|L6dye6JRaowPCFYKyN8WS6!wVu=Y&J_lSAlmGD;*&dl4iTZ^h>3BBa(P>M$6PFT(QT0k{f zrMrUqU7krdyv}I8UPU9G8{eQzhZpvGFi?BTJIZ*~8w}xVgyb^dk_6S?2=yDPdZUR| zJ9La^K^9@@g%w#UvO6y=k~bOdOxG)89Ynn*JHp*7D$~WA;X?0YO59m&><@1-s_&C$ z(Hw7NCPc_v0l^!Ry~4N|5)&PSw;9X#hGY^-%MPhX=-UCU=({yKeVJqV4#QqXeK(aQ z`<-B4ul#KBkQtYL*BbfuD2E?xPL}+4^V%`Vyxo<^BXZ;F_?MD}^d4jThV=?F)yrx` zz86T;9I2L?*%RJp9AAD$MI3O(X2riB`uJR8t4dd7%k4vXobGY?0V8gce>uKf&uQs> z%^jDud7U4G4-9AbmsSwq+7mux(BoZDoTxM^Q2N7=Mvl$C*33-k?UVkamTPt79!a-6 ze8fmwT%??i0&~YGFR4rZN5MyVC+o&Zv>iK#hL0K2SKj58oJ$f^|2Wj~O{74YB+RB| zi;*+kla|IOjOEM!tBA-AiIV4K9kl zY;B@#vZc@#87K;$F|4oX8@VtM6$$+;plJBu%ask7WFO>Cy|q`ix%!+TeLdX5r=M3M z>hnZdqo|G{2gzIcf}tkF$@p*41oTz4qa;V(7r{eRtUX)nG^DAyAg$%_C1d&e)N-;? z@**UB8H59I^HN2I#kWx_e#IcZ&Jxd85)YCXVP6GI8^a0K*gN0vhp!pR*O8`r0i{v| zeH|dQO|2r)nb&L;gl`zp_wAXCBJHRkO7=Iw*1@SbQj6AzY~GmmeG1<)wC^*h7@{pI zlKX9N@vg)KjyPE&e8+IUccqvG5=BV(E(qxBrm9OXUqWKiw)vi6e3fiP<(H|E_kHm2 zx)fFi2tP2E@2eiU&Hy<-1P7&MR%%VP5P`;AlOGw)*OM4oY?&mnKL!?!(Kt;zW~Tha zaK2E;&=l`NZpu%AI1o=vdS-12`!CJLjn#f;7~j{;^Y#7bK@#Kj{2VZRbT)Qf(_B(a zqgo#L#zMlVl$@g=zA`e@Xk{_mlWniK<2 z$eyV;aRX%;(mxo)_stbXu<}Yo{Sl}%hP4L+&VMq(n#ZtkK6U>LUG#&pVcNJ03yR?I z7lZiTf=nd7_%_y?X%hTbfVFV!?P+x-u31}3KZL&-)fbuLiLutUBmO|*-$A^ATxwiq zK^s0&Vhw88=1*eA%Q9R3ABH|l{^9s$Nm)uSyZp;cpH}_}D`@xS+J;NL<=KT+GyKae z_zKXRgwk`zNs`omgQ_*&tcSm*wL9maSD} zr6Nt}lI759MFuTh0hUncF&m-TnpvI?S2PE{rX0^!G7nUWpeqr?3EcWkhAXcT#IqH| z1C=7^Dga$yt|*%$m!ETY?zXMrss=sVb$>G@=WIKH(9YFphig`xA)9e*Y|qWrWEU}6 z^H2I5S2sJps1B9l%@!#cgRKq8(!e!f041qxY$_um9jOpxT9Jg5HM8TI#`ZO1DH7Qg zio{+ESVUH&wIco7dD*V<0Au+=4!QJ(s6g!lp^ak71|M`pLnn+KWDwr~n@Q*u*^vp7 zc`&ahw$x+*IUHgrUkNqT#uE|B9}4-b5h=kwiK855fJ@kjR2d?!O+;$6H+%cK;W~!! z1)nP#{Lmrex?miHr_MC_dVPN{$V+}b15L=E$JbycpwGpQk{o&02M@(p8kCi!d0G1+ z+`wSIW=_SBZd4-YhTz~MLTCXsbbVG07$w85+W^&CVXzLoMs5uktTFWZJ(C>>mjG#c*o4tvU03Pr37> zsiGcv$AE`YBb6`l9d8e`*AZ$4^;P=?5ycjMlVr(W2R2@b!cxHDScCfBbvs*uc7$bx zNjeTB1mLB5B4~#7hQZr1{`<{U^+|&2$3q>>xs)SIQzMBex=N7L+kuKiCHRvxcVwqI>em9i_7m%-NRnT((4VU8q7EBpBK?9%>RO_MBg50yqq$dcg7Ok!X1q1 z>~*BGl~<4==#Bs(h&!}|A+{q9cQT;wA~Tiv0%gZ(#YL!+eP^&!XhiFo38Sq!G?K4d zHb56m#n`eAx?<&NrtOO!p5D(aYp)MmFQxW+YHx!!8h$B%M#^C6=wS{{*lrA8dnKQNGo(h|4)8D>t!bj@ zOYJm_ufizhm*#p>XF(ewWEP%p^3F@9j06Sa7#e-b+!HS2hJYD#J7w zAj7s{+JL@SY^a}z)&&8(0l=e@3MdT)$Oo)zJ)Uix3Hkf@N|y?HT|8KtO>zXD!z)5@ z({JqSN_P0UM)Ezad}$LUsD2*QG1!1^P#oZHOBF8%bB&I4fzLOR?_p&!v9PIBCH$_0 zS7lH%jth4)xUYqllTmu^*b*}u$B}*k=m*KAH(K$+WgP<CW?PDT*1R`L3*D)HG8gZx%c>>n%!< zX4*B^NVD_x+UzK*k=6q33PM|u;ix$y_&$Wv3<8m$>^x-A-DkG4X2ZudloyQR>z+_T zFM|h}9+7PzHFKs@>q?^*3rSvRDBlI;#+j1@)%QRh!y2Wvcg|}Kw)ZA6|DqE*M%#uP zy0p|EoVsqE#E+%=T#-g^x7G{u>(;O1B$IK=@8q;;V-YqmC~S{Yiikz+Rfo_uzAyS| z8pRhRQzW$qDqhLdMVpaa2N{QMg}xDepIbU#c_l5fE&?m{zhVL155X|L$2e3#TkmcF zb+n*Uk7Zx0J+s`gKG2eZe0Q*-4^&j-RV{;yR;Udl$5Ci09+!GEyENDlfY)3`NJ4)WQ9cY3%Y;AUPPpzycD}yp*`dl2 zcW>Yh62a7bZx8m$94sXK-N$&&Jc!{*1tvz?eL>@QB`bGh2;+XnS^IYtC8&OXsG~Y= zFU}*TA@lz5GCaTlzAiA&PDOoAil7G)q`M(c)wfJd?U+0>Jjg)KV1l$UQ`7`MQHPWV zgL05u4thS4@h(k8B;Lu~;fEN=SLIV67O^EY;vPy|dK08Pmq1`&V(||%+$DY!OpLUL zgNA@iqDiMz2TvYhEZ@UQr|Xp!rAT@tNN5E#W@c**v;`ZpON&T?X@*A`%~y3mLQ%t7 z#621~RE|<5_Vq2)igB^uY=`g|qMtVrr(K|M^U19?LbGQ@vv{eZ_Ac0&Hi z_<~eL6j>!l=HtP{=XNm)DQ4Y=Cm78473kRtE9j8&L{Ly!T0@~;$Mwmso-G*zdy>(7 zX)C6pL_5lBlKfdhI(mI}R8%GX*`TA#RA{ZsI-kAp9OF*N-^YIsR8TArwDAu)0-wvv zl_Ey6Lxv_Mgy$J{^_Byh<~bcD=Z>xKJGiVyKO;RS0nkO{>X zY5{Q@B)$}Nc>`Vu0|-IIw_TQ}e31ctk-F0N(zg)Jiy@8A#Ue)%QenUt#o#Rq1@qOh9jg9o6e9N8T&J(;7y$dzXFbqzsN}q#E^ql`(zeHml{3 z<^%q(1{Gzn+$u~(S(*v2F_f=-!kKzyCc#U3EyNEJainBw;t*FQd7UwQqr8Qj&b$&) zuLlaXm@Pj%CyNa&?Q6C6G$lmv2IKi^F;meCv!lEw$!`R?*rT!6YH^Rw&3u#5S1*@v z|B;I~827qHT{?I(9H54dH8!+xdDWb`;e}dnah9JX5?*lOU0mT#x%vg?Aa% z`Od`1VSHoa&t*-r-wn2nB_Q7Z9)tPrVreWP7i@now9y=w+DtNWYqIc$tPmxuSj*CQ z?=#AT{DaZuCQGg)p_eo)$E$fih#cIL@>zFB{D7g>KDd`OsQ*Fex?PL?~F&%#7kR|inSM~r+o*KN%7oVl2WqP360nig;Q#r)fY z{mu5gEW-3LGqidE!{?eVf7E*Z)Fu7npyPFwZ&x-GmCX}BVZimSJCOV6Re;K$gz`Zm zk&UpXp5v#C;QQt&pzkUVWD^=jOFVS%hhPSKv_J{-~mCO1qh!6 zI;gW!`^o>p=M3YU?9Fo(!vhf_`bT}CL1*6`@^{A=`Lx+Xw;)`_A z>CMY-gVJ`5+poyf&;C}a&}2e_Y;64{bFs_iq9mrbp!B-}fNs7FH)us!Z6+zpaWF(W zvtKb6zN{M)iBCGq4>d`0zY1VF;jXs2nxKig_|!Z+5a zUam4kgX-Ue`az9eL@)4~)3_gfo`NnToz@*_eVzbWp?L^WD_Cb#~|PnGP9$ z1Owl6IbrwgMrT>}2sGMO*Uc-dD7O4dm8O9|!vNkxq(RD8sl`C}i*bFqIE5Tasu1y4 zAW|1j7KW94oM!l&0eqd0T+xvvsQ!1Tqos&Qr55l1Fm2y!P%7T3L%=@)z!!va9V0rM zw@rtC8N+#lhQ@_Ng@k{DfGA%a*j$i^9G97lKZpMq&i5V^g!CmS{6<%>jMDV*UtVQQ zNY%Y)!v7t|seZmYna)*UPKS)kiQMq~!bV@lz-DzBt@?6khszuEOt~fF3k*&vR4Na& z8(dPRjVr(gUU_YvG+Pq&l@YNk8unxtw$!#sGGtr{3=Q0JeyptZjxlfvC*n7QaAo89 zsx;1rje##aD#?<66<%YSpwMr~;)~&`#`Kl@6*C=Fh?J{=aIiHEBj z%Ga5yDE2co@~#0MI#aBJV#oiQ#`4vgJX_xw9;g&S*8&K|aw#ikb5Gg@Ask>p-?JN+ zkP~GIKM-)dpK0(j?st5UF@4XjViZ+{NI4jkgLC&%qQ6_EKP8uUh@mFr@8iEf-onED zG&uqfCD6J!(od2E5$PlyWxLV4CcF$(zHe&Q&H(> z6(Z%jprG-S?s&Gchin9BEZ?21cxX8}60Z*;vO#mhWf-YALy~u;dlPP8R9{cPWb_*C zC=P!lQL=9cc5Zpti`sFCjT;%$_YGEj?@5ZJ8-t`>tMv35eQZWI2sbg5vujoH0FxG3 zhm$1*^#1lzjl+-Oh&8g(`FbNuT4dc6EcC0TO*7Y&m$w^^G>UIbE!P@~3e-M|*MV?e zS_o?a%cUM|1mBC8ov)XY9nM0IxSIioViMuTW^b|9mz+2KFSe8nH#exS)lybbborOs zx2J(y@Oo{$q&?`g8vSrfLp!SV+}VU|6qJAR6YJkITDWp5nrFAX>BgW1nOY=i44b`te-3~k*8;eaE zp?0q}EdzC1cAO^j{%dI$s5_V)U%XI6^unve=;w~2sae@p8BWWB`!U6Z+&dZ97v(l7 zrQ6YvFg@HE9#G43p^#1cW$iIZflMoXP8i=eSV$Sg^Ps3n^ah|&&}rxZo$A^)tdp`C zKk3p!ry1Ng{F4bOK07Sx(!eAP+(HbX^p9l+=6K2wNKJl|3X}N!>(a4lhK=UuY&mOu zVbNcSYU;)bp}9>kcer4tG;Lx^cY>(ll}tZ!&CAVZYRY9QmS9qGVwVYZHike)r^C@v z!ic?msiUR(U8+r{|IYN9E3$^a46|Td-Ijd**1WwtXDYLoI=OG>1UT@J!d z1N)-hNkp%=N|4mEKt;|MTXmYpxLsyNZIi9I(z@8Y4DKsfsikxtR0z|=6igf`CI%NF z`TF?UWUt$8>ALo{_MjJcjgthly)yM?+Ps`4CyXywD5+j}Fj}9McEb`fLu3lLmTt8j zGQ(UZ_H3A(-m+u+R5;sw_|hs(M~&D~QI+&_Ku11N%1fEzf@(F{mbTSt4(j3DHS#NF zqz$W*e;)W~9{Il6SU_TP-hteraK2f)(DjHed0|lm)BRoH9?z~^r^;Q|tpV<427TRe z>gc260W0bxO85nUBYlzY{l?zxGS&m1miafeZcoC_>f!F@VVmnFQLgY!Rcq5q9ahj8 z)G6KTYdg`kJzmxY)HQ&@J3u+^!_Qp_@P(=@Cvq%|8glv<%jW(!Qz80iD6PhwNAT79OD z*Sa3&4C}kvitn@}N%B0%D7txr(mmPSXm@1o#TxeK3k!z#72VXTy4iVQnrOqsEyV;T zmc>Q;YLktQEQGi-9&JJzk#!x}KDYBfHKH93 z9#;DDB!neVuKJ)aEB8qaC`+K2w_Pr8nW8?j>~I8tmIknNupo*h9@Yw4G7G*w{^;^k z)W}-~?-=3vYF?QTv?w@_d5uG?|RtK3AS-+l0V17Y%gOfNReius;w4{7FnT~kV} zRtAYKEv&!-zOC88-kSUma}1=rrVXX8%=a0Di_L(qrh`y?O-3jZeNUj#W|xI#TIgif z;{RQ&@v?gv)>rhKNWAb0NpkNEF7A5F43fZ1+Wi$j#R-|=K1TPA+GbLU&knPnqB2d~ z7bcDt6IGED>C8(5zST_c!64kv>`cok*=qw>wR${QxI9rSVFKhe-nN08)Bl~Knc(P)L)d+knfas{Fh&QHH^czi$ z0nhaI%gBVRv?Xgf%u1pAI5V(QP8k2eSByWdB2Pz;ha;4PD6==VWza|-9pbT45-!T> zoLKhj3FgH4x#f%hYE9yw2s}PFU4^6;|6f8FvOlk^MfW5F`??(QbbSU@dW1e1Q2Af3 znY6Jr*N}K{Y|QNJN&3!8P111JA9w62b(!BFw_`9X)6BzD%+ZYRp;%F;uCjvA38dwx z!t#v;Sq^;2z+MNtm)9F%eh{8!_I#aUQ&V?#v?GRU3fgq?bUKM4Q)wOyWRcI>w)9^~ zGL<)I5T0R9eC^Akn%XJ;ssx~&XTr`Aa%18;hX@VSc6Vi~!)7b?oGeQGthGAQ1sGK@ z+R2V90d(|iI6~Aw3Qrwqm~I3u&zI4d3ePbczD5mI)B_I+k_0~&;DZG`Sm?B*EYbhh zw5sqtgZk>Nc&;Lc#E5%7aFS!U>WGGQIO;YAHV7{;H+4CEe97h>+Nh$@?h9cTai!|l zN+v=#7O|qpi_DyFh@&XC%8N?^4ZRqK@JTZ=B`yCf&?U{R_EKA_ftQ${wWS=WlU1c@ zfR!SKQ?2qUr7Dg2rn}yt4|naUA#Qh5*RH`FE59egXN|aV(vPG@(RQI zD&&z7e5jH4O7QR!uU;~vUY=d5cL%addfeGFyvpqOMszZOKAG&WswNob(eSHb_;4{i zC9M>Mi72CxbG15huOCMJDwp;tK zFnnyVyve}6j`CosnVaxhp-AeRLDddxjKOW5-dUgCI#nACIyP+Z7K8iR%5zaluR8r) zAxj5ug#)~J*_%5VBfPQ}$Wkt{^ft5MY`2bTB2~)M&f8%Jxg1zif5X%?2X?|c41cQp zb$sPgSd%{|M9@0{!cbB`8hvfoCj5U-<^s$6UDt;f-epMN>lT&tzK;|o{@uW9U)XL_ zx-Zg|!45(1F}UxkXOFK`4i#jHe=qRzzoFS2(mUe_W1OHUpP%lM*OQRpBY2}R!}#$6&aStrpxvl(Q3(b}@xHy+jfdd_X1(dMo-s=I)QzO= z55o3Q0*%dza^8dPaV}d>Xf{Onkl8%fWwWTMR*S!4-a`RHYafQSayg$ecRpfHE>$@n z)d>73fQXMVp1Hfrkkux(#j#9!+{Ec)X2DnAQB-dO4^|`4*2iJ%Ah8u%{ZQM5|C9YT zrCks{VHSKD!z!TsvJ_dL1nXA9T6Hu)<4LlqS7S*w?D&+K@VykKJa*6_kDSX$ z#m~c{elBq-nv~3m8Y6tctW3D9q;vJQq@QzgBz_UZ8zn(Bj0)pvY#V*a*uJnfZWP26 z&Vwpt+W0bT;IlS50MZPGfw~GhW+O+arSQ$GA zY_rZ~|N2E)R^)qT?6MvNE|F;S`>=_IdPTApzMlG=EG;QLm+%8~=IhT>Pj49ya+zRx zahmxd%)~NjQ9EmAOmEw|Q|D4-BW~eG=EPejMKN`P|BI#Furh7@7&g#|mw-rGuXZ!y zy|onemGRD>mIFkty`)6QLZrA{#<|klBRH?aQ>?7@OXY1Nc@aMSW0)HDqI4`WWq-^UjVRA4b#l;d^&6F|IM zG5)q*8?U)C@n1&uy<3z~Di6?X%`}PrH_&LXmdC#~Y&~bgc~kZ2^LB39SZChyf6R!l z!D^!F#E#MsTNQ%7{tI82XfU#z>~wnj>avHE=l5;J0r=OyIS+D{X1qZ#PGG$rZRxh4OcYqgzH}9 zc2=*DBlSw4B5f2AKrBlpD^=?1WRe`y>uOw3)VB!!l(TkT{ zuS@WFM)JpH?)g=X?JNunLFi#YGOq?E8Vu!^SjK$Omh|`5M|@LroYCGWdl2oAHHnWt+|92t623L?v04OC_BRRkmeE=^(q0PRTYA z5(PWNEbMYwpr&3w4^{!t)S)ni*D;sSyb}XKl0_r!`4A2>BfgqBr=wnS$9Y}yuMIxR z0r5QCr$9(qrlwuTJh(@1lNN8sC~4ZbE^h;G9p)a%%1*+^j8j>$!7TVjC#XXMT3?q$ z3BNwz7?wgaaVXXkm-)DX@qM49NvQ!lLIZ?Bq{6ZvH-ra%5%NCABugokHJ2&e$nX>5 zbB$lbkQ^`k#(<((#=A8cQzK&&=_bbX)p!&s-1594v4;bT`z=ezp|r2<(%CSw^RN6O zZAf#3!F{hir4-JCVO?6dDJ-CeV)4Q(I$84XbzJy$v@vY;B*xn6G*+;IPPnBR*yS>iDeBvh9gYCd(yd?#pF=Vp z8`%<-!7dTu*5<-FI#jw11vMgX17ua&Eb3G39NpHizUBxsW%bf|z_eMa5`PS@IHj3v zUhc;EmWr+!+SdfD$WRl{&4DLL!=&ww+HbAxyG#6sTx1ea-){9C8Ljq=EomnatzvoGUOLdGcy;so{Y(lZ2%0hXmt`+(Kle^M{>ks zEt2LdZZuE6Zg%UU$A8mpi+Q8QLx9 zjQ>`&`p48m6P6 z%y5hOe%s`yX-Nf;HDJ;zCWE>RaE9$>WtZ#LWQuAgI~)O^r5&(@CnhtLPV30@-_DT()xap2n-w(^>F@`kmb&?R0jfI~$h{-eo3yAz;er zo#8=Iljtd+(VQvt8&df_ZCr2pEh#|?txzLzH;5I1q-J|=F2xYfHtwYlBxSNRa1IQh zDQTZO%8sqlYmh7rSqy!trERcquDS4y5}Taf0XwP`rTDdE%pk6Jv>rnuQyP1JY->!!ssTY8Hgiv!ar>W@;yQX*V+_HW1j@rW1 zQgchUzbspXn}c2Q&&QXYIl*`rvef`80d!P{BYccmJ83ee`^MEE?qNQBjqOZEZDfae zRl*y9V?+jDEf(i*)$*Ijq@PoeE6kXchU=DOqRyRUiaeT}g~?kBan$fkhzmj0WjUzj zR$Yfenr8LxF0200nPC`xw%}8HE?gW)*2j}2C9+i7FlV-WB@I8N;&)Y+rGa@EKu^8Y zX_Y0`>*y^H!h-RACHv^^s%aA520UM>Y>>#PY*s9D+qPpm)`e?CvmVh)=Rr}E=siHA zQb~gh8?e!OO>@TUp<`TMudQOBBCkj0BAA74L086nx#CyXpuU=HxLc4@BeKWKEM`$P zTQkeDFPRa21tvB0_VOU6WvL*c7Xf;_+}p|1w_sFvDz1~3j!H|g%}S3%#?oYHP%<^; zbqi+7mzgf-YeAb^i|rC%X?+0e6}?5-TevUtj+e}w?-`BgEk-pmm%$vW>{CZ!u+UrX zG^b>%p|IEBm%6ea5~r7a@Pf}m2KcZ>r_8Xd)wLjyvCI8tz!y0$c9JR-NnHUI<$wh| z5PpzIsjM|5>D%FA1N%CJCZlhv9Th~$z9-ly0MkcY-`UeKvlu7t%52$tt&v_)8Vre& zes9q6z75s6*ncbBXRT*49k-D`yVnUAyN zgz*ge^|Pc5T;~1Olg2^+q!G#WGX%KzbX|W+Dnu9KC-#2d@h}4c!1g6 z@3K2koS2sJyh3W)rLw`RdLXU};lWbM+FU<8$c#G6leyWG*RN0gm-sW2)#hy1v1Ghxb9R@QXg;eSDdAir|?ER6vois&$WBw;B9UNt%sQ- zXE!2iGpn6dwCUsF@PS-myr#u$Px(#!MQt)}e0tGlS99(Yg^CH#qiBg9zzccHbnEeSVM?HS4aoDWYj2fpzpid8r$ zD$~Z3VdIu!qcWf%6HpOf*cR7%-93P%rAtz zr?8JXd&37%b$d27!@_%_&RmOkhhAF_e}6`rNsr3WVZlh z0T05nj5*DM#wFK}A8uH>Z#w)R(07_Ux8sc#gq+1r^l_?_s4hjXW1d z5X6!No1g(6}c zLA##e1!l`vANykvISQS<5Y7$}XQ{=yb7kY!tu<}!hZmWV3D^CpX!PdfNPIDfG1^cV zjKkz)EEv5%yu`S^KIw=amI;#lQjpR2F7yUu$?Yyy{(qTKeID3^iBCe{o z2g`GF?fu#)c#VO5gAqxCH{@TYnM8$$;uU-2fgJm+X`(AL{w>j<&MI*rwsG#O*EZVF?EcbF&N z_d-E+#Dh`lwDV5bIb7^mGl9vQXeQL-P8VpWz02JA(iMl=3)Q;x@oxA)`_+=o)65IZ z>Znhiv1#(mO}oN-%!IGqU}Abt?5IMTKHdu-R}&xjs$%=#eFi;6em}l-R*ZF-4hioE z0ci(W!7NKghYzfgQdKOA4k;f5Bptcr8uai2%fC#(O%NYu%9wJzUFFKFy5pw6x#YU?@@l2OtB;zstyLHNouVu zxg+DB;WK8+7pz=$yUbyI+W9Q(pn|vNAAUe3*XziBX_8$1IWyuLxicZXEp}9rCI9o_ zV~EpIvZ^w^k_|dwxISAzGfzTbzjA7<`L3#@^v^t2PL078^;H~VQ^oBVnohtVQlc5 zz+r$Xx1Ji-G}qZ#-!kAO8DOeV3*U!@;eM#({e>SG`O@`6MT@i_X0$>- zUzSAsk-q>7s_PfD`kgyRluQFmQRbqH1S`UI7CcvUWXR-ag~$*JDzJh_-fIi^sP*bxXX#;8aAq%?X;!h zvBf4XZ-y>(y$hGTS6>9v{T1LIsV!2LWh0|qX<=(_-xbZCuacq)%fn$wg0BQH3gwIO zo@|u8Dq;yylQ5(-2xKm9v%VzrFT$10f^T3{^QomGsVS*#DWMtn( zXJUHQ+2OD@jT{Uk!vk1pp`%00fbVOv;DYk)NC&Wr+pru8EFBkN&?Ng-V57s9i%D< zT{|pyWt}WZ4_I$5F6)7U0*4llhsE3Cy)2Cd$^M_Rft9SKgHmlx~UW-_IAKx(4vq4IVpDr zvm`dkvhQoEo<(gu)*Mm2MZZQ(S7Ug}ZJoGkg1 z;N#P>7O?$*nLry2?yDQ^WWB|9WIu=|$(umN*HzWgiOGqZjqV$t%7wLtx=yM{_UT|B zA?(UQ%$6)c-9^ko_Lo1yEcm`G6Vw}EN7K16(wsc~Y=Iw)7O^p^lPGnd>@H?tLjFF! zyF~@%^T6Z?JQF}P0U3~$u={2t3aD zsl(gNiZ5+!ZJiq+Xlw_JVQ8zIxSqX>#r!|EO9+bl6)4(2=>^QVn5{8K@Pjj zfG_T0BBMUXZ0%+`nSzr;#YwTije~5{#`hKSNki!sdSvbf6K~_t*j}?C8;fdJLQGd*I@1NIic<~Gcb zS({isRyPe!KW8gGm=9ksKd-CzA^(e2Ks0z)7)0Dp(hN~JY}pZW_3maqPL@B8FOqUu zia8llE&v7n!9rmbb3*QJB;TE+dIC43feZFHn8V@Y7e+#WQm;&Opaqx-I1LKws8 z%4JpJTfn2Xv*HOqU~WK|Gp_G}n}R-ZJE9(m^B|&?lTw@0Y5{Vw^Ggf$uwYbQJEI~& zx~NEQ8(egS@h*;DyF*7uE;JK$Ibr-)l}SoN7CX`F(jfNcuTG=EJus-vYn^~xn~MKQ z;i`^VayGBy+3F$AXf6tpx(Mnq>9&k&0Hg__xhQAX?A*WH<`T1x5EhLC7m~3d@w&Kx33BW&l-X&)?D(2X zR8>1XSQSBI0~o{0Q)@}dT>=@lR zQ9UyEfr-MB3%$tOf;)~WPqJoGyBYQy*jHFmD1Il!g=kEb_!Z#S2_E13m~UlLGGQ0H zX-wc^b9Hw)dwhAa)~_WGquG1HESfZ{nT^vO;u=8rGLyap3b)A|gy!xIa|qyP!+#qq zcDk3Rn%mwuwFv4>X`{)rQ8ktyZ)+XhDGJ{+)j_+r_ zd`+^U5pI<>`_Z^1P@BPIzX5v9tabN50w7x zKvpVBi^4s~Ecm(wRI0L&$5jKMzWxgg+c`yl7d(d;zO681fYP+xSwYDqJo)q&yN7 zH2(7$ZE4EfqYUL7Bhf9l_1cEj2z)evXbI+G6xu|V0F4Z}J;s>69zIo+$^!{|WRe6w z7GP~@WeaHNP*kliRmkIv>wA*9X_4`QD?~|tJjm$YTJsojz_i+rw7H+KMz+bs*GQV~ z8dofclKn)mF&46V$rragl*Xr&yx~da=dN<%_*X2GRzulgRT-Nn(d3h15;c6We=LIw zi1SM)Aw0z_`NquhLdjINC#I$wCS_W9DlJG;D!Un0x*Y(M?OLt=| ziZ-7QoAIMS&mxzIC8MshjnE7)Fi*a*uaTbO6Ov6zRF?)`2m>fE%lQNgtxi9@$l$($ z+C=nz*ilMRfF!vu1{bw%u8fTZwG8LTGE3nlhW1@|PDkOn?duY7i^mZJE!t{ zNwEm8GrX@WTPS#k^@x2vuxR{9MD{dUGolq*&9ughtd<|%U{qg&D;L)tRwMI`V4}Dy z5Bz6KuGTOOuho?sz)XWTnUfjUV=rs#%UAy8DnOcjGtAZmX%jK4=6GsKMP5C;#r$62 z@@w*A=XnmXX!WhIik!{t)nhr@TWrWq?^(}vDZI`6`C>wurrycya3q4Z-VR%c?WTXZ ztrz5*lwtdo@D4NKd%Yqx28D*y$a^PvhYF8p6UHLm(fP*~SxPm%$qW`Dr!oGa&zFUL)J><5y^(BKDQ5aTadCFGqNKLoRp;Lt!QYvlDNWxDt{T%g6qEWt^cebH&{mpS^+#3 zEaHG4WDL41owF|(+V?t{hO+G_QzZ3^pd#Ro7AwX<(t@}=EaiDiNz#{DPEwwH$sDy@ z&t*pR_0!KSrA>&FNzY$~XVeFH$uTp!B_AH{l}yXkAHrA6oUcAeLVB}CiIVkP-MbfuH(oR!h;8r(Q_>SQw#LD>EMV_xLJ{=oc>F25VtNaGj-vtr-V1|?Bm?)L#lS;VNd6dIbbAjk3b3_mcuuPmZcEDuVF5GG3l zKcoRXjogq(+>}_?4*Ag<4Nyr9@F2A|Gg%t=F$~~=7p6@tElUL(eqv06Z_l2;`5=fEOSUpkkmYnmol{=$&HHdC&5S`Z@Xm%PCA zd8g$w7h+rXS4Q)79V*I*f*`5C26eq$pRc6f78hLB%;@kN^RyrWXS27;{OOd zVr1p+m<)r)B@nc<{gZj{bt9;z9(cf>UO}2h{tP2%-KUqVKDkkXS+)91z1wOv2XnF| zP(A#`Y&auUv2IistLW0hU*X|s@sNvNPBvsvqnUD0HetS7EiOYJ{$^%;!}>WrR$bQEx2Tgv}3 zysync6}=rim~MnjlHh*>jPKIgv^pFo`d1B}ND!x`tHBaVN~T!J;#6IkHiV5;WD~#8 zT$Ymi>gLB+5>QKh@SsANCawV!;|KWAY@J<_#3Dp%!!^y+|6qWxfTGQ7!6wSh@-xO_ zup4cCi37~oHrLZLwaWLWpiL(S!U;;S+`Lv;mX|FYWO!c*R_rm1{Vy=DDhr%9e6O8O+13FFF-&MjSD{H6$v2CvJzVl`9lhm~+W^K{uvR+A0INxnYFN63}3H?+Ge)41eO zw3^wFDKg;04a|gZ$drQmx$t0=I_=yLb`aqx)ko59lA37{Ze%8WcY-o%f(NONOPWOA z7-)3R7}J*j#%Z`WG3f2(&yJT;0^$lRD-m@#P=|?Dx&0%{{IMte`6K^%esjT{zl&HC)f( zl6Mo8N3S=7SA=NTn}|Dz#lA|M&U15f=IgFibgJt^s^s4S{Cw0e^^)Qo=0Uin!F>&( zDtJ+ny+{Xdh1(k3R~S%6 z;XGgmHEI%l4ABy7!4%gNXyG>Ap=Rg_*Q=O|*;L4px(?LCM6gs+Y0xYjYfN9cKoPxM z9;C8Q6^T6#STs}e*`*gP_oUbl>kaA~+a9q1V^kyXcmOec7++zThK^~0G7tTPwbGWF zx!!(BjkFU%E6(=Qp6f{lJ9(|oaI+LgGHDPpq}&da@=c6$Bg4r?x>PqYsu6e!fI0>% zW2Q1vUPp0<1@BK?E3I%7IhjAHk#>8~P+;Su9W`4c?!GsWA;`9_>V5}va=z;hj~UPt zbecn50@36hVG`}UT$m-lxyrt&h2Fk%WWs!JU$~Qbn|682N$LlcJ05|ct2@ILn!LFp z2QN@9E{;86MtsFLRm!DUCQ0xHfDgm%=*{kFE!EP0<{FFb&Pq7VxXunp6p0sKAxP>Z zs0g9hFncbMVA)!iQ_HiM;Uz`PMl*7b>q(fdTCtWo4`qH7_ggevO?hiUV;%jMA|hfAtT06p!7C$#vBtFY;+ZW0y_XPXaSm62rBMYSZ! z=YWjp(W>h=YY~YBh2!E8)2^G5S5znYUkoh~oo`y52U8j|vh1Wu{Ye{O;e0dVD=f3m zUu|TEIZ@t^y8?~s!s0R6Y#>RH;RY>tGYh_N-qh5A9aU-5$pvt7xHyqk(5Q`lw9$2U z^WvLdlIf|L>`)@nxmEM!y@NdM)L{o*!-DzzrZx94AHFKogof`)PL})z_=gHV)#ICV zluq*T29lIHV`yL6K)LqLixD^r;9&wRM_AAS2~8vV`eqc-YvloBHc%wC1uW8za}nRP zh{2pOH^|?|mzX0X$Wf2Pc@U2lMJv6~vfA3yM1HAI(gwUc31yOkuvztoULPrlVHusHjT%9?+3MpA{EXjfXTK(H(I3qhqFg z;p9wSU1f(J8qF@ktbQDYc6uKlbm*ER=W8>v-LHCN_P|6*DFw)IQf1soL-y|N8{OBe zq?+Ch9!P94%h#kdja&pHhl!DVGy~hXN#U>!Ck;U=s`psi)tLPMgETO}*RUM$;WiIKf##McK@8Ff)D zNbWvzWk&t%Qm-c~rAp6zNd|dpI{j+Daebd^CZq7|Ft1AZ3gFk0i_Ru^V-bLhjd_ae zrdLhy79CRV2?_!tqjE!w>9jG?>0ai+R~q;eMJs4@dT%(@mtb!8>?EoJUw zrhKi*imky)k|f_3WRwYoa$cLh9ogh5ofYn927Ik2Q`48hj!=h`wdv&kbRwN9ghFfQ zXH(f*q1))x8?ww$cz`+al}=PsBRr^-rjZB22->pcN`DcJzwjV4;CmKSQ3E`fPT4dn zN$`UKK2E^SUfIgP=D0k>OwG&r@a^Z4oFyv}>%QpF^5e{aFVB!tYJdl6cqOMx3y+6| z!^A>yg{Qf=rqmM*?yHDLPX~^Iq&^W;l)&=NSA?t@L7u-%WqXq0x68jB|Al92{Dwx0 z)5?=!1tTrK0;r`qb~3e9CeSX$J-MD@F1s$5m%JPrhVv?(imO2RUG6U+0ca{b%`Ezc zBuz!{qaCF_h-#Ajbdd2Vb92vk=hw8BEvCX!c!oLfrD^2U)I;ugSe;&;2`?y=Sq8)* zT&x__m(b(xU0cGl%!jW~rjnZA!H_HsJR1fOip@9jvw&xPAX|Gah3A+D-zPm{@UpB+ z6VHVS6lzQi*qrvKm)A!F|KWLNz*nSJ4b7KDNq;`*Xx62&ApK|O7TNLqwB4J-3yke+ zN>mgnB}tNB2r`0%Ey?if)O6c28D5%PXvmc4C0V$uzq}+nAv9aF;YDWRZ29-&3zni{ zd?Cj{XzRtWg%MaA2bRddLa&K2(U+JHXZczf6CD;L^QB-OAH{fIC6S?3Z^@1wT`Y!S*3Ogj#y=!eR%3RQ zA04RT(BUiLP{X;oMQUZ$ruHH-S;MQ$&4kNMAq8+_%p;Vc)g(vgs{utw1pl1>3Rwwq z2f}L%>5CUuB9j<_uLTfI4oj?!t1C)NYc{lXN7V2-<2uWyw4`EuA&ToO=5%S{^{{~0 zp{zX=W@XkM{vSh}gYX6;`>r^hi=_1-G2-3`+)0A-Rqz@;lJ%sloAD;|)De;Kui+)H z-pgoS!JBafC{isJZ9lBEB*OR>bLcD1?QDHuc4R+5lHjTe-4pRVql#uVRXU|;b? z4dwD6CrIeq0X;^bHZ7rQx&!`L*F5#O-gtP2+41FcoBG&E4`mdMz7s}|5~G!+4Oaxg zRtOU5d6(Jqh1X1Tba_@J(AB%)3SFu+2qC%i(2|khIC2u+V@`a}tvnD>79{w+03)Rd z`>t)6n$`t&WDC3Z8SfPNYqr|U%*b%=RCFepH>NSdvCVWc~{NLA6aq(#~XK^y9z z&Pm$u(&R$B(+nRnrtkCPi(mK(v`LwX68>Spt3Q36aP*Nivh5pJxE&*)K|glo$fBTH;e?tnYg9Vl+iRh1_xk8LbdWfuB|JypB)fI!a-@&4?@NYNz2d zX43b_7`fA%#{&uDWs(Gc7GUIXPt6AGuFyoQt5!JJJ0n%q=gfh#xtZj&{8AeQ&B6fy z^z(W6(LPXlY@Q{>7tD#Tk&_L1sg3M#GLHF|N{uhVK;a!mOi= zXzJ@QRrw0YOcL4CZVy4u{s-`SOW7~X6YVs;`o~KQZzPmOJsf< zPH!$iYjWW8WlEKF@dnubUE*D-CG#C~>3fVOtIyAlR)z2`eiza(j!a>aX~#5g^+8KJ1++_IwXH;6@IXtlId2+1>L$e%>WA{r?dDQEHTfd|e$8 z{gL^*ECPqBN%D_DM$;x2La_BYWrpTY4DE~5urXCD2` ztoUjzYN`(&tOB5^pTiVt(VX{uRHeCZvu^e93-ja4rOL^bZ%u_f-TV@6P^y<-jagZT zV5T8?$$R$cOY$qT_v9U6V8mDK6x*Kng_B|~HPQnT#qh2|jq#*CaPw#S#A z*?_$|$qqSwPi30;Ell8@Wn)&G#8@0Ib_ITC27I3jWfaeYq9)P52O6~r{#l%FFp#Nn zGL~19-^5?kG^Tgj%2I2KbUF8jKbRd~1kF9sp~#`dKf)qv+)i(P9uw2I%6~OWxYuvY zOA|i)$qe~M(rE=Yau2my9YJ(VW^ui z;xuzPVQ67l$U;f0W=b$y7Ksg)Hy^%b8B9uz*b&lO3c~bo1$aQCa#SW#>Opf2;wzdT zU(3y(gj594-<9A`+aH=s7YEreLXL3=xj$DnKfc`A5uc^hG(Sr=;o8n(lhDRk*4h99!e$;rn48Da!jOt5#%W3IV=8g+v^`cE5hr$OU z{8Fo}-kQm8dle2dXTAvk+8Q)@6pdb+H>?y(m(sjFw^EO3GS@L%zM6ehJUuT;1J{Lt zL*!0z)duNL)MKwVT+g_^Xg4JE2@PuzcYWZ>rDB$85N=?|OI0dniUi*f;G^+oRt~Jk zh0f|}P(R$rZ1`HXMbY?{Iyf|UW8M|Ku2`Bkw$XdiSJ{q3aQDdW7B-&?5wDqr};|PD{Nd$xD?7(VdqV+L30! z*Y~5KTHrx$ai@Yh?HmO=*A_cgquCGG#Pn$M;Jf>FvYN0X`$05Gz8T2c>nvvINmsEe zU%&iv5N>XGUl z)5kjaz*ES&D6#vqv)5_QuGG@X>)}{);`>B1Ikl1aWI2M?Wk~0SZ_9bjam~@ z7j{&N5|MvA_}3PG%mqO7VR}K{lVa5`7}jcn;;x*{tv-8Jr!z zlgxv!Sw%T@z=QKsud=93E4PD{lf{aEAW;JSGU-NU0ckN3PBxSOf5tJ3cwWaTxQ^>r z+)}i+>N9Og6*|?7`d%mAz{~!VZk^P{mq~8&A zVt(85Qe&pmN=I^DBizY6_+F@GQPYj?)x)aVbaH1nLH$?V{MCiQCd`Jf z`W@5y%`XkM0eFm}7Up%~8p3Ht^|gIR%n^eYFLn~Nn$TA3lt`c%+c@dOoc7GJ6v|Mgh+-^qrkIWVSOCVuNT%jl3=3RrSi}rB{9zBVGo*auFo-Oh7f2s+iU& z8&S{pmL;ictATwL3!A)pk9m-GxTYxK+W<#r{o=UCiiDDM9qih2S3c!tT?>KjX24ev zsg}}LQKf?&aDd)mx%!oWCbk*cX>i}wn~WN;qg1CyQL@hh``Q(3nS3ErS9clOSLzt7 z60MpfPl1dIeN$twf~4Hu{GcYWt9eNp7@Q`tWyu_p1@^+US@1=cWfg!GECC8Rb+NiW z?d*mfy!a{aeUE(JUD;PjhXT(w555jyMLK<6kIZwxMAX45DEv^#{jnPTQdp0 zW;-%LGSB06vZ`iVXL8qMIDd^$YABNjIYC12D!R)$a?)QpRjMBB{dhO|-MzyFAf9^_ zxd*9_7Vd5yd{qwncIqK&ACrDZd|f2Zy`nI*H-8TU+)jQqz9PX%@l8tV_ywgKkUmg! zV(bG+>LMHaK!je8U9MNw=~)GN{SDRIP`|n;Wije-p&@+tDm%5n$$m}P9>DNo$Of=W z%S&yU1s|7u>lktM(%jBl*FYlGT!CkQjG}}t0(yCY&d-LfagKN0ym^@(fE4ZYlF!dl zvV%6;*!dcDin1Mw-V>s@4;W&5f;VK{fz2sG1!@f%e3E; zbbpXg0!tK8Mn=K|4C5<-<7ov({1pKY1mKzim~4a@DPXY`)*w8{FutpiowRXs{)y%F z2zxNErr(zh-j>2c4CA|+cv=Aye?`DU0YKwIvQ~SiZ`m3iW&~g30_V-kG{^jvBrP%? z4h90F=?c$UZqM$SmY8pNgu#4`=h1h~Gzonqpr|NyLznO=J$Dy0Y!wp`Q!Qzt4AD#(MFuw0qN#_+VIsJ}|CxW5*L(HxVPcn+{%gVCn z@~@=*mZT?xgc7{j2~RPEuLN%vU-zb!v`>X73LohhV&>%2OxX9i#?y*7NR{aN=>VYc z$tSafXBfuU_a07JNzXs8O5`(v#AEMv<70o8(R`0RomaT3^gA-1O-AY=h3Bl1!JbwA zk*D90@mw&_8rav6Nci&%;A;)wq{0>87nFWJq^~8?Rzud1DDnbhY?Qx^E)wjl)$3J} zB=Cg*YN_4Q<)gxj4CQ+!jd)(+iqh}Mcrh5bUl;aU^Wh~1@ZB$*R8Yq+DE(5}Uy$W$ z<$vL2rtY(!&MN@Y@5p#L82G+V-x^+F^1knTzW9BgzLlb{geV&NgXL~xx!IO(?%wb! zQ$KCh$ILz$&n>v;FGY~BR|BT859=SHrqK=XUSrJF$9kQw7Ile!EzoF*E-mc|uQQhX zu*z%9>yvW|mnO&5eLZwhe8v8H$}W0?0er<*{^SBF{|6%92qbE1>zKXCaK4TiPb{GL zOJd#(jF$Xu+4!v!-eM4E$*-ps5Ci=c0dFN>raiz`KyOHAuG zdR9SRe?#^6LLEh|Y`vzfp7)u)udGVv6$t5fWV{~?)E_L@WUU(MQ+&Wklla^C;7%+} z3#5V~c^?E%`$&T}zS<8N$oEJV^|S&f{)&JP1As@O$*AEYrtfRh@>`#*|yX!5k)Gaq5d}fX8 z()k5;>2G9z7Hkw&a`VFHj3+-hz7sj8$Y>9!`+4Z%exfIY1Ut#WY9o;Q1!MT`XX*R` zuk<&vzX&#JDjA;X&h|npe954`nu;eDX#6EHUj_y--W30lNmSu02J)3O*{KCo_G`kv z3K&Y7x!8C3nt^;JO+2jtiN7M?>jZR=u-FRUSR){wRsh6b5%5g{;>z&hTWbWQ1?BUl zO8gZ8-v$6(-pxT$IM3;YJ>fgXaCUidrc6C6w!2J^r0;@+I1FCELF51N_SOM%-PZSb znVAD^UFJ!Xv}u}>owc2~jpI$$iPID&voo{16VJ|UG_$rhWoBk(=6=e|%*@Qp48L>gww10RQLx?(Ta=m=`1_fZ(++Kb!s{XmNj^+-!Bx z2efN$i~E7G<~8rh-XEZe=!Ycoo*6WUUJKEsKiV)&D3~+~HZG!x=EtO=S-|MsW@E+u z#8~pof>;fiwr4%e;8j5=e_PwEI|*qMDRxv@EcrN zL;kto{Qr}Y9zN}ix~sz zGvjekCaE=C3-{v0_0IIZTSfwJxLYn^=$V#sGRUugD*U3ZxcqWe;*e&PuIvP9?Pd$x06XJaL18*!I2mb{u8UA|B7#CCbI zRoBAM@bP@msisX;U3(Ed0$RqEjDC6$nL*^uR)sGqSEdwx zQi_9fS25zePf7)oH>#VWxhiR7p3V0UxT_gU-uOUM4CNA%cuyQxCkKOzqWKXc^xQR! zDsM=g(jkQ)JrN$Jhg#_2b8arV!;LBLp<=n@q2fJp96=5adLh@YJJKlf2EB5M-Z)T% zTOzoo5x{xg9Jp(3l)x8D9)ZzjO*0LyO#-$sB+o>c+1kKuGK#zwrh*lcfTriT4r$Ve znWl^?cU@yVBoQ+PEb{A-oNoz(InGXT*Ehbrw-kyc&lhfq;07dMhkn{>d$sLuX!v;@ zx<42^f%iBHEAkFA9MWtAxvwY`OgK>ebiY|kxCv=!?|Oe+SSY)x5#=q@k9O+? zm>%b5_9B?j-O7maI@ee(d9-*>9JeM1-`cRf?9W+o z#~Df9TT{V=MK?ur8`9A3W2|jx_QxAfo;7176C%4SrrVM!4dFzF`~5w4g7IyKj>(Nb z)0*(0BE20+`KHjQxEImg-pKOaR4kV~TD&KYJBR~|;@sfs?zlmYST5m+_r!50a7#tK{)CmK`UOao;S0$dAshPYP+Zv0kHX&`)O zX%63d2WvBKt1(R<4z(CD`I}9FMZb;o91fX=@VHyK?Z%fk9EM`a!-ZQSIEe&DfWX?T z8PM~*Rmbv|JK0#LFFYAWo&{|L60?J1*o$a$uDi1_=k+2gm^`s=ismk)VE{$5iYpJ05lg~%k)vdnhkkzGW{v7#?ed5y z!qfCn&4pzj3wXLQ<+Z<2BO@Y-VU7$O3xkoM>=V0!wiRQ_`*21F1H%%_ zd1T?o*_=frJ-2We$L(=wMmpJQ@OY}Q2at@er2XyrdF+pe8^b-&sPg*x zqG<9&#ch#2h-4+t$7!wp!UDD!8Q;NrJ_4ZV9zr@chR`bnhrvUQEYGwkl}(C=#$G>L4F) zFg@C6@?Mt;ms`8;5m0Q8Aselx1=t#oH4>zK&FJ_m1}q>1`#8eVxL`U4_jtq2GcJm3 z4$*q$4M9JF(5#>Ew*_YJckYQskvHO(3nwq5{0C7!iInWgV?2_3va#j$j|uUX{)r|^WJS67oJ5<)?nx*+_Q};uLeW0CLc{t~~-nxGx|sN7GKN@f@ zxR;uSL+e))fyBIwVmJ=Q#^OU|UT)-@@t5^2>&b0|aH4tzso2?K^3!&&G>*K^mW7f= z&l`e%6`}oT*Tz}1d)=!IJnx%o_+klsxFv$ukRTYzILH*9f!7*CUe}75I?E*#@t!zd zM-F~Er#AMv*BgFb^^8uSfFk5KNPTa0d7kOD-)QK0>SMV?eY_`*|074qGR5go2oQaf z5gn{$>hVPOW|EZxN666HZj`CRO3E#lg>m29BV3 zkCEkdr(rG?OD9j3{6(zqCF|Ak*^G9l1Kx7)GyX%mCB=hF#QP~Cm_@#O_<(U{zQ(1I z-eKTxiQt1IV2hVkfgD&JW^r;KGOF$P^ZK2&0Ln(5?g><^A0{gs&%REp4Ksqv^zI`@ znCB!L?V3txV)`hV*ui4&3&Mk!-N%e2uZxQ1lJ*ksiR0tsplKOC?b;S(h4_Tgy~bZKp1eN2R5p2}(tYuLk$jA%FFFrpjQx_a zo{Yaw{;sU#^p(vBE8;Jcn1emejdou#sysW{&YF(%PFxH3tHk9?im5i-*9<%FC5=nb z5$}oP>*Vl$^TvFyfn40~8%C1%6(O6dFPOX@-4x9?Npn#=%AmQ_ZntAEJq)OsF3I8)Wt_F^1r_A68aUATsrO7SdST_8NQRej^lWmp?vIP8<0@%Uhgu(Fo z|7MhV9lQ!AkExrY`8#RYfEV40`-d^)HDHuUz;P|ye+n1LuHC;jh-(S5KT$d3O-8Sb+S1^w0U#@JiQrU#5bYFZ|Bp*AsA(EC}$$0WQHVdn8@l2ny=y|4$3?*1mUzOC1D6yvLu4aUJ{yi(1Jf7VZ z)78nuZqLsFVLV;Kc=EcvST13S_r!4+Is8e0a1zn`GB7uMOaXhik?w+&$z6oWpWgk& zof=?af?3Hpf-=hQ6TSaQaGfr7uutenQ*hAlvji*ZYm%DXM5D9Nbk{O2{Fw5zYC9~H z)-P@d`r3qMQ-Q4=Yjcy~=QR~8nUL6BF*UQ>zX5|(&R9M>a9 z+NCcvdd;=^A~MRYU?-AM9+ED-j4cT_kc9Zz?>I61z-dQJZ{Tid5)RfTtRPFkjVOTq zCl2cCyII6iq5r(GvF7!kev=tjD=YJ+5~lcWLcY>^!VWZ}fxD>@<~5?p9h}D#+097i z7oEA?5&3UWqc=CIJOd#*r%^O%O2uuF-GXGC7;@-#Lsv7Ryoq5bmXL&7A~=c!n?PV4 zll5#NuAXaT^vt_k8ucFh^F!9dr7V(lG-dHi9W453zdOd*^S;zrE_w2JPaMaRgMntn zqqo{Cb?=kEmC;RKhp?j4yK~`+`qrfOt^}S`dYqBr$CR2?Z@P*?$%FBRpl?HHT1IZQ zhrqz&4Li>=@}&}Re@E!IB{~gpoetqnFoHZoTm=&j-4xC3NW=QUe#I4cd!xXQjQU}r z1fDkpeFs99a-zT$-)`Ut0YszT(RlJ!M<#a|1*mB6L|Qhs*e9R1br(X`kPN@wbDNEK z`qWA9e==_gRm59J?EB)7P@7SE?nGnUoK>Tf83`w<8B($7AuM~>oZD(7c})*dyl`%m zCrutl=-Y_S$(fFqkG8^gqsbf1(&7nU`X>>eL}Jz;MkMZC@Zr%X8*BDxrm_iT2~&JK z$VYp~4)k+(Hnz;6tl!_;K_b-DU21q@x{H{6ayuW;d&&lxtYq?3c2`V0$y8c|KRq4- zF>t3EVP-{6%nNUrBHcw&c2){|ce6&8*IB7x@^HE-n$t*g7-(!Ts5RFX_jh{5JpOd! zoyEU9_^~sEmykIMVZ*^h8Sx^0*bBRjIIrPY$>fRcu9(gs({<8Ej@cJGIP(b^^Xll^ zIxCoFo@olsIwX%io<_pwNJ`rI>=XIbEg4&$ogd33MDd`5hPR{Bw=&(AfDASJZp; zcwAlGhgHw6@#Q^UESEf9yeE!ja?tAX8PZ+P81k$xUn<3C>UN>`iOzvrwh@jKZ(&2# zz^xe3bdQ(%(kej0S^cBv&m(=wTHo2mxqMsO_}H8?{)4sF3sBJxNXt6)`}5q;sPgL6 zmr9<`-x2yM(fu-m_aOMC0oLd~qsg;mgS$Y*6Tb9MBHmA8b~`0YrJOXm-sHj&^R0wp30Jr!g8Ptwof}qzkkA}kyNn_4>yPCUig-^P_ag_rQGWImwkF-* zX!5*KMbU(*xGk~=kc>4pTy;Zq0uMBzyc!F|5|VIB1P>yCpZd%xZO?;^V>5`?ZwW9J zM$`5*1t+41kcfuUO1Cj&wm$bznLL z4T5ggkZ9(y#*#Osv63l|@Wk{uGO05P*2|#QK7a>vk2l6V3n)1@I)(b zc4_0xQ;jfhutQ#x_Jro2B8&KGB<7?gY^MudL>|GpkcBO&(RfeJs>Z1yR|YHU|06X=K<2FuwxN5IG3MD76Ejx_ zJW;)wRMTCh+DQBsQ}F-9RT^m|?5z~Wn3k&B2q{9j;5%?{Gddlb`8q zd5zN?v!!Ge@t%Z#g&e-??ju-h=)P(cnO(OpmQ+hPe^&%w6G1pZ&3%1?1ff_$5N?U! z8zMj|DsHU)<^~Buv4kMp62Z4fz^2LVU9OF@TimyeBhTEhk_nC771MXbG(SyrtO+n;X<}A6t~;Oz{9gp8TT>-3OR$6XM&g1MPDhFHmzMtEYn5Si$i?60(4 zgXvl?Y(#msxe6vcx+$8AkS1L#@X^W_HL~eVFY`SSQ1O0(Tv{?O5E7tIw&b1l@HwI=l!HFfz2 z=We(w8uuCa$A^4aA%`SgiIP$;KzsQ>5q^1PlQO+E*%M9g$I94}a1~16tdN0KS2e!8 zSs}_KEf&|py&7>#TU+LkoUi4sZUlMV*yIVh#}nB#NXAUVRZkvbx_Tk<*&SxoXH9*r z`ZB9f<-xo9w}M8B4yPi1q}#Acqw~X#FbUJUJ4tAxEW(B`ZB4_=OPJz2l6;p1U-W*& zy^_~7#uKw1dUAaSK+#=`bn5?j;Bbtta@RJ(=`B`|$Ud%AK50#rzlnPjx!JRaW%cVA zV_whhOC>!1j?k}5^a<-UKCd7`xO{x@^^AAAlR5L-v~`*?i9}tWqG)l<`1p8t1LMxK zI6?q@^3VZE1UDoBo9o;er*1i8>yBM8wcL%2C9m=Lh`d%M5oHNbWH%;R$y0~r9Cs7r z${W8XH&mdB=%yqpE%hRhlQ|{^?q)++&w`!QQeAEoQ^Lw27Z72t&JvcIv&d<&l2y6<2Z7#r?n5eh7Q`@ z#+arzDJwO7s0O46kJm%(ZLi@d_?Ek^QRO{UESEe~yeEzm$ie1?t;c+t+Zjb(2NQ}V z4B?gtZchSwUshM$9Sl6r`?9*40Iyn%?;QzxMDi-E8)ZG$-O94Nld&DzB(oyW6KKx+h(kH#=n@{9a-7wt>1t}zMbS8^Zg+c?j)nj8}ogss4Km(?r#tO5mb@M# zmP?){-V?{EI~f62FWJBJ&z}{-6Xpz+v5NW^X-LN2%@bc<^vfP-5JJu7yO&aADcQ9 zp_FGZx|bk}|4j08G>EY@dyFt|G#C?K8E%Q-EE1GVf}LGN3Eb6KHfKH7e@E!&5S@J&rn=lI=G&H0ZVcU^~Wlh z5b369TB1R~4f1lrDs69&MguFLKh`Z9cHZ2E zg%a=xB!V6ZI7)=Yb685QZxnf>gq2Kq?5>zr$W$8PXRtbOo>Aq!i^<&@&_v`&#E-q{ z){vWLU>tcLyDybIioYZDAprrky(rH7DS1Qp8~GvW zMH6s|I6x6`xL`l$){HcJxL~=Yaxq-+o;c1I2M1(!r1#n&M=Y0c#CzhnH#s<;_va|z z#|ZM~^J4?91btsZvp>UQF)`Hr3_Pzt3&j%g;g$&QPXf-Q!e&4u0FG-@4=|=Y=VGON zLRtBnxF1OF(ign7yXYQdvu^5KMQR*Yz0^xxW^em zUc=iT3nm=8DVoQVhAm;S+n;w&Fp9jEz(NT;ZwUH{gl6Z7{G=R%-IEMGuk(!M5`uV7 z98V?(d!GX>$G-C^Mv>S1gklLpxFv$8lE4qGi=E-Tdz#_ry<~(z57Qd*cZB|QqO*$P z(CVIH_<2=iLvksuG2Ro$GsS^P)p7^nPS4sPM=Y0AQoJXQXOkoK1}&f?3c<_IF}^$F zNe}viG+>cGm*o6XxNExG!SL`rW6b+fR4{o&-4xC9N#k1rgXY}}j3cupjKtnI%fa_A zBsyC{T;_K#GW@)jpzwh5v_9U`6TFxl97$u$Rt(S?xR)4L-bh+0pHy4rZ{mI_xjDf3 z{4(`mE4!B&@k#j8{=2VP|yc{2$snb6o>F}+$$Y~Eo~_nHkdS;>US?uzNPWGZ{MFu@%=~k^zhSO(bW_ zYt3`H$i3P4@>*UfmONXyC4#q*fEBldoxSd@2A@}PER=xrhM?aDuvIU{F zKJFdl@J_nfwVmzZQopqo2h6|In5TDA&0929I!D2xU2QU2G{!G9Yb7l4zYCClL2Y(z zrV9_k!GH$T6+u+T>!cj-H`pCDwvWDvI)%tGnsAG?0%#b+)6YI{T;*<Rfc@O;8V_uYkMeP2z>js` zN*4tSiULL)pCAV_HqEYqpAC!|*u%^Dq%q}GTKaOTVB-4}`RJ&@XNCAZOd7(MI&hyh zw&?~@ctw>~T}2b!XGj;`(Mj!ItijJ3Q_efWWWK0C0};XJ3WAyKt398x(tX|taz4;; zU5;Ui=L_VC-_I(7DC47i(U@|&oFPjV53{=f63v%L6PnyiyW5Q{Up5x}ShvYFQLrE> zV8rp2f@3%OrpWQt4RP@G7aRqQIKD;>nyQ`Nd8-|_=Dxlm_{C=2_$Q7CBlvF+KJ+uE z`m;OLh`(txIiJi(BTm3X_N{_!Ci+RXcN>^&f7{4%z8I{-tu1vHnLOg|zM?QT4IV`Z=ki|)rpf*_>qjqQK!?+i;kzaS4M%(H8e zIw%?yd4pDqte}Tk$}3?>J>Y z<^FC=__6MbNzJMPMjZbjM`&W(yZwgR@js0u=WAJD$x^iyK%)55@gGuf%&srE{`rU){jcFqU$G6HSV+UHhlm)$z5ovDBbP*sLmhQWyAV&n+@3ijV|2I?+u1k-2o;jM&11w}*={w4GbgZ5$*aco0B z!M&tWO!q|uib{QzkwkMTJ;uHc&ZqYrmo|=^4`jq3G$2u2h7@67+}Y-F?({ue)=2)> zfzhzUb2;+(NE7x92m*1JHq1WI?#2Dea9hTkGN8B zT`IWxSOuZhuV?(3%Nu%g6W+T=75nwcepr>=w>)eM==6|&;0DHj(9d7Sm4X{m0jEjW zCh2X%-3>>pu;%xq+{noDnw&uVv+JUaDB2s7_8Qd(_ut>@u*p_;6Qe#E|7@~NkkQ&p zu;RZd`H!gbS3dchnS{F>tb`OA zcdQ0{G0AA+Zf#noZ;;8=QYDb43%uMD5`#3| zo|-tYE)P0*m|EC1j9nFe)1teB>B#F{BZ_Y@V-dxBNAlLd%iw{aJSLE?;ZA&%ZQV9H zn&DQ|$jRX#1$QSCbPp6>zt2swOK*%zQa4lTVdZ8$JipUh=-1HZ`Z$&h37xi>s)HH< ztGH5dA{B6j2ishCV|jVUE+%oVaShJR81;0+S1naLWk;dJwv}wxLLZVVac zU4uKmxwnqY9i6`0X7rigal{N=%K8KnwVk4TJH;`Q^Zf(0o$v*1w@ zL%sdZbD+&8a8^u^4npwd2tx!lsseb#Cd0O(&EFe$VIHL)ne<% z8^_aPL(W}{cYD_33dcXLE-IiRKZWFU=r8y)W@_7a&vLIR_H^wu*6IE=HMI!fNvmLD zJC$r4w?b$vrZNMhQ?2cc|HY2cf!k%gdGneS%G0N}%IFd@OCclX9>#-5^{^#oiDLLwh1{r^u`<~z&01#WaUD~TXV%8zp`8V&6j8kAl9#sAOzBX# z+Ubq`$u$kPg%!8gRO~^C^}lVX$MN=MGe1Ec&mv`ADq{~9_FVe^?}r6LKgcedn7k?} zZQUG?DB>Q8>Bq2Kq8QwmrPSN6^|n7QyKn5LWK~6h<=;thIRY&iE0l35lVafG< z9&F@6KyJ(sv@~!7(|-Rf?O`K!hyxD$yu;uzhI`Dd_2lR&hkQz|tF(}|u3Kw%``}tN zskhFOih{moOBll6M|}UWV|vo64fy|z)~I3bh}EwB#+29L`SO7*{Hy?q_5f+U%OxCG zR>RlG7+su5w6D`@`-K*%&+x)P?Nit z3CR4c0piIgA&UCmq&^JPRUaw_^JTw!=I&$Mc?PqPJijh>M`y3WO2B<7fP zHx9V_8Fk(}7sl9n2cSsrPtxmv6mtB#J{2h#cjG|x0gQDXbzSSmzqkjOgjxLi^?P?A zdI`Fi2roeoq#$a#6qs7fysE3D{h`e?fp}dC4 zuu|}FDxhU2p8`(gH~?D<85ka6#GCQ=^_zeYJhLtgPK1vnAqUiwXRq`V^9OjO9%a;d zHWn$%mPHA$^wVQY$D^s^CeSf)YPY&V7cz3!;jCXwX$I~wrt0jh3dx;AR*59>v6Of% zP`Pf1UBP&OZkln8{AKqZXSzHcs}Fg}ou^)I5=DZa_8gxvV0EUaP&YhTnC60YCJ@P(Rt}BCJ(=RT$dk;MLccJ$mWV3;r<4Ci;HTrdoUn46bA2cO-2!sEcKsdq3{!K@tant!_3x>A za{`$}K9eGE4!UZ7QXN|4y$kSc99XN>(Jk+5bXVK%S*Em^rL+p}l~!*~W0c%yQ!ZDN z)2S@S3#_|5$7JOVmjdx}bP*=U0a3Ki6>ZovRId+K=l#$20GVc~@fJAjKGXA`opMiM^b~OfCI$lg2oE|V8%!y|o<6{Rn1*Z!iyHZWSX8fD=2Wd+x zGZs#yFD2sw|TAa>`Q`OdK3 zZTA+jOZF8eBG0v-Hoz22T(2b8^}uDj$Kzq>zC%#&RbS|$HM&=sfV_1i!{m;1yO>NM zQLm;b&X{Cg?Zn~_k`DR7m(eBeHKru5M;B0d52dA~GOFlbOM3R`<nV`4Dvp$=_J{3xB>%020n)v}#N@S-WVEe- ziSUgiJPd@If?>569cj0Qqh9cz&iU9!YqWWTypa6!>tX~}0^URc^iE)=z~%ogX3p-- zMmarv)%?&&hg?Du%Uj68d0M*chd~0f%e4dUtwxwP0!&^>6r`BnM&?aqZs22YVP#=P zKNwlE45!5smbV7SxVM{zycw$j`ii!z3GmYM4(efdd_FzXcr7r;x#m#|9l<+IMBW0A zNAUv2;)(TLWaVT@6EuCiG7Cna&2=WAWh)RxpuoyLX$CJcC!rUW_i*rIEn* zP@sQJu)}LR=(+MsZ&;T>dQe2Wp_BEu+kOO}|iu3=Hn`#{5563>7dDenAgQbFI0&0)M;Xe~^f}j}`MT8fD&>B^3Xp zx=0{J{3R08@z%p87Y<&ho{HF*^!s;THu}7UWMO%FU5o*ih_6V5KE|!lUSf<6#^5?^ zVE5GxiV&6;p^GuV67e;OU;)-d$wxTAZi)K*V$XejgCd0GMd)G-uta==BIqIti*4MZ zf_G7yiT}lXcX?3D=g$vKSl$<1 zAcZ&QbES*{mV_TsLNaEc8N)PRM&tdlaUblMVVI)*32Et!j)%eTz`j;L>@~W0Ef{qi zdQ_BhKQ#?`O9>IrKm0n_((*HE;g(*0C}ABwIA?xt+<9@>sb8XkCbnOY?UG{iQMA7_ z9{k8?d}DZsBZgm*;TAx$?2k1&U#m951qgZ?YIw%I2rp|lS3@S~Uz>{Q!Oe!6`9EwBtTT_!a2L^(F=Oaks`W?A04lZ`n4Ced2G2G^V zsP%{<^gj^&hQPr#U#!hyC2}|Z@ANhjHwSpSfA zHI+o#{zPrGkw#|!nELhU|SS z?w>}QSKAT8KkgVjasG>(TujsI3I59-0beP!Ch(O2#+ zY^tVr(Q*u?!2Z3Zw`5I@a!KSxD3Y^v7)?op7_Mhd+7WKW7zugR`F)3gt~!#v`9&8W zpW-fR`p*aA`V9}I*}hhqGu)-j|w=$%sKp z@x*uuGSW^T4O?93IE;IjG~T=k!pn|Z=O8C7v;gkb{Vo= zE57A0N!vXO*G)W|(t=?xYutJE9UBedd}G!H{hXuNl5#mcczJUi`!aWVBfmpddz`S* zj#whQ0?Fu!vhfD4Qw%p(G^*+CY5n2GUY?e6LKJvh1#Eym{TYz&yDw%7eO)CF5$8!O6o;Y?RpJxR6jQ{q%bm z=KHNRcXd;e7g@tIkU|`8sEHD2>8IgK&o!uLeS;fjVBH(tp=r3oOx^U}+)V2Y7~B;u zDLq^@XVrYNj51d8Am*JJxFbwk=J*$v9@BR?9$aFMq?qgAHOl8xcy^aDdq;#zAF(Ca z-@WLrX>#%whlJ?m=wc$g1YL`Qs>`#qPuRkIocgs*N1iWJ)tpvKtSh04eiP|AHVn?| z;%~>+W>y_nh8Eh``PhIb^*Y9$=fe?>XV*mqROHts`Sn3Q=XV&)h0PP(niGG<0X@V| zU(b}}txkpg3guBeheq12Pi-`*Rh>Iy4RX2X1}0^?9WR?m7-L@v07ZI3l3o|2Xg1jR z5Tiz_+Z;k~aU;`^H=Ap2Y%hf;k**t4R}H$XTNpdzrVUo=YzFQorg8d$gu!m0x2cjz z>rJVZEgBAe?+(For~j|RrUAC-o0+J*7A+kA@^n!F75U9c&Si--GOB%GWwnPW$6FY6 zo}ncaPpyj(Y!x9zTq7}OJLa>TTSFXecQ^Afc#kO4SGc20K%V7eXyJ2?2YA)SVN1y^ zsf05O#i)cW7tuJc?U_4ihkf0{I@0O7qfOKv6kFdy7qHi)i}hF}?-Np%aG+J$~tgIlO{Whi|uPOxPMd)G_R}zk=1Xcw$BqH{9rVsZs z;(>z`YoI07+ip-q0P_;2;Y!2_6hYfAOjP5pJ&UYw47jkuy#=>39lPMJFON%A&R9%mnMKVtk3@=~?j78A1>|6u+16U%xHqi|w8v0Ndo{<5vPW8{uY z@q-iBb~hGJWfwbR8s@Sz@YxA!6|o+JWNoD^Hj(I(ig_A_{ncj3)v(Q!OkYZol`_V) zRmBwdc5?gW_vJ7(=H^!{4EI_zb=*nDoYy$H$<-#(Fn9#fc zU5tg6kh3X-=JF(i4N=72$+oi7CnLhi%BCrA9&Er~lwDQq!YYmQ-JSZ_oy5^$rGdRs zwORlFOPy8=D+Kp2L3!VDg5*UdcgomOaZjq?Ql~5h{5P%)6^l(61-7>vJO{_sP14;_ zX#I~zu(_*E1(g(@Lxl`NY;+cyHUEG3N6QWPaIg)qVR|-a>G9=#)r^A^=R7&N-+=N% zAm6yqz>qMiX%f!Kl2Bl7FgVcDNn(o+J2Ah99F1u2N*^wBY{&QM5Ie@YIjic4;u*)niS%5O z(sP(vuC#^VPQ#W+x7T>{JROtG;R00bU9y+@g=i`EXz3t{bQ?QdmW@8o%$RI{mQcmt zBR|*J{c#3#Yx-?=1J^g!|0RH?f+n^VvfUPJW0FElT9FJh97NY6ZX5H*^Gw$us}Tf; z2K%{mDe`|-pl5M>7QQ)sPx^%5Av|{#%i`#;OlzK5B&4xlrol?UkOJshwdE=V{0_R< zdkZsT)u^Yp+4a|~3Q!U6BQfXsa60kxoW;3O!?judhLzeGu=n>L?0yrGH=;#6|M=@* zOUnUjVaK?^m$D<6!z?+Xd*nHgty>pN5{1~M3nYeAFlV`4iHg_ zC;)c6_cASqe!-~1C8hVKQVykZo6Gg?mqYGjg7V_|1rn+!I`PYhqP;I^N9WV12-nz; z?#)T>gb`M zWjgZSgmAn9T~t6t{%DfZ+piHyb1{^&y#tRi0eRuh!U`=R+OuPTCE~FZ!5KA9<%@~8 z=qYoLGv4VvSuxaM(T6S!pLWNyr-&kaJP9W*2I1sXAA;+iVElQbT*hLMu%+aQRKlS? zxZ!tuCqEYA{Fy>eGXBl@7wdNnBVPRkPLxk3a8x`iN;%()VoY zW561swdr7<-8FkG12u>4Ii@8qU@Zc81ywMyJ(p~G>*}3ev%88MYuKkXaL+R>|8LgS z%Vg5}d}?LfV&b1)YIpnY1tw=R{=WW)ECgRSy72CP!in&OB%~RbB*Ymu7VGF`+>4Ae zuXQFn^Z6Zyz{`Lp$`_N8t&<_ke&;xMW6kIyc%9dfetU)H%}Y!|-fEzM`scQ*DHPK6 zQtD!0Bg`pUKikc#2POnLk^7O&mQOi`Y@GQx%TFcn%#UQQ((%{2&W+*|JSeALt{ zOhVphE*!5x7Zp&Ezmnt}*QzEWqA_R~#tg!*G6{L_rHbaIRBt8}67*^b;+B5D!3En$ zkVFF8&*3o%<_#RAuQ5S+1FfNjhB6IXN?t1^2bcnGzIH|r>j`i@)>i$GdVi$>H-vkg zDai|yjA&lRRAlLRJ$2A24omirsEJq zR}{FC@D@toBq()VB6RA!Rm3^Ew;Fxkj8-td5_A#Vp~4jP+epoh0%k!tY^>cXSr|vV zw;Ok!_qSwTjKdS}JIG6mGZ|OVM|Kw0+&hgkZ#796{&{ub2dV%l(sz-R7XC8QPuIf# zF_Puo#)lsn1EVh)T37-kp7)UFa6pX=i5LeEFvz8k_Zny3JDof*8m8#qM|#$+d@6LK zFzF9gmfib}H?MAm5gxbXmx+KP{Qya6k(FIr-c9yFW4uFN?RrmJ;43-Zh$XTQk&GYA z`22V7!$z5BjE`9_R>%(q2zn93{1Gu1n`s$1fRv35Bu41Ej~a8H!=Qli4_sW9QAPeS zlKY@I_H!jSOUQ6?-hfGoK5hz5&#dkgD^`4ZvqT^SF}iiyFB*iBC#D zL#4iBO;X)6xC zwz|)osJt2WNVSZwtm&*${ROIK^NuGq?Cc|NLKpLEgc&mCymgoMn){+D%o~cwoB%Nn zR~o)V4VQuj43r9M{IXHvN5=4w?)NHy#PJn!@XLuSX>m;s5mIztf7RIXI_SyEaRE@Q zUn48Mot*g5_TlSEY`ySaCfG}7LSyARr+IdRcwScqbkngdFNE)2#f?2!!zA4BXKZNNk zKo?U8BZnPyP%dqIiEqUd~RdO@`As+gjv)Z2Wn1 zbfJxHjR~}b{DeZ--Hly}3^(0RO-5dKx9$>W_y^MPGiu-{Wox_`4iF9KI`c5Su=?%4jZlU-_8P7WZKww?S5@?GQW|DfwSWYBzWUSy|$YU1zTS*2Lu1 zO@ZaxYH>LNEg8R~jN>7Ld%oDe$DJX%tnBd)4%|9+%Ue{%?@eP5Fzho2CmeMY_HYks zD{I50ey`Sdi$@)El)mP95cz)<_L_O2V%sRWp5hODifeAaq zLqLJzbrzS!uM)iE{E2do&aPBCW#WE+_h%EBXQln`wOL}7?!Qp?4WYY~0ju;gzI@o2 zSOjN?(O*q$UK}LrFf6Ty;HnaK>1UNj>i$M`995L;1&dhlBA?%`^JTcdo0z=ewVFC6 zK@;ac$Vs z%%Q;gUtR$;u`#C%67?U7;%c_~UY-x3Su~3 z9VV)k^_r^ap?)fEfvJ=dd=U!ff{~d3ov>C>cNaBL)7>aV1D-Va3sR&PBPrjKUg?RA z`L(X!0NHKK^Hhh68+V=~MLz0hf+TQOBjBV+En4v-ls@+})*(_&EG!{V$efJzoh#1eYNJdy4g=$=qd)ce+ty zP#f3<%n&kZx|}pknJi#ayW`+-dDD~^{31wSAG#Pb8HxCkbOlP{!kzpXJl;rNBxIk1 z0}%O<_PQ&YoHMgtjo`v)<>Ijjx;vgknyy4m{u8Nep;~}8dVJiKO-JEEl`|*$$ko;*`Lv8hvJ0ZcssjT~?7L;%XG(BQDA*-*JK&YzwRs zT;2FLXT6NXgKkOR1t-#Lkn~0X#Qc5o-hRwXkbfR$9{1c~CgW~dGBAK_#m*N z@f)HMV|3&7Ow-*^Wc`t*3hJd*Z;oSu^DqoEkn%Ks1NMI)4@+s45%H#7E3pHj@s;n^qSO2W;@ zK6`y`?v&bK&~>*k_Je(PL5jFW;w$0V!>J%Oq}}f7RL`S~bu<3K`b|AT_(vXv6XPw( zNH?w>O4;c3vGd;@ZFG6*rY~%)Kbr`EB0P?SZ1Tl%e|zkd_?YbfHpZQ2t4uuK zuZk=Y$5RAnzq>mJYQxTfnLdI9dM*Cs*5kOI=m|!|tPR_P9lK~*h122gWHRz%8OqTx6)f>>Cf~T% zHuh74Rod$CXhyfiq#VN6Hl0(FPo!inV3y*Ry#?wwlFXQ}>25C#ZUXUdNf*20>AFM| z?N-$kO%9$qi2rgff+n*?OBRDu=lHhHbtmrP-BidqrPzZ6}Jg_e-hDTGFtOd?bcmVvO{zvt$RJ@5UDSVaYv=ysEC6X-&m2cJAd zkK7sOiE&zi#LfM^9az0*7GIElvXF*SMK z#R9{tDK3j&1}-UQQ3}0Ug?9GRg?@+QrZBeN)wJxwKVH9oEZ{0NGY(y1&ZZbn{j^Yy zk&OQN(s6osGaY$TKOu$2SqUn!Q~D&}?i4^XQGH9<{T3Z`_b>(1U7#aGbq((WKkBApx}hu;V;mL9~m9`P>jUZ1=da&NWS3ra@@#Mz1V`Lb6(v zbp&LUmLE$$!+_p4IeB{rC}RCpPLC}e3)C?#eyY{k2m9GAnuI)e@YK&IV2XN))U+?k zox0!W=Q_rE`>aMXwoms^B0HC4^uTIhj2~yM7-P|CFxs=<+lpP*Zm&t0zRgPCOBL5Q zi0aJ=WD?n>NOor^hs+NgceY}cfKj_OR4lfZxMhJwwaAV!jJRw{ZGQ<2-b7hqn;G;~Q>p_D5?N|6S~wHemv&NKSF<}og#PJj}Z zBiALt<+o2yWcaKtbq!}Y;{_!P|lhBX{35dEy!7)*yDW~B861S~O- zf7QtI7T5*qD_0lO7$j>SWpU6*9f0)ntq$CNlaV*;5{Q?fi?~O`BZ~F_Y5DCZn;Dk5 zZ(_|j^ImSU0%M!|2qm)fNp=Lt#>T`2TgTnYXfyqnA&rQC(kZyoaBpg$kJuboEB*bL zN!`b2x8RSHhr{(}QXWrq_az-)>$qdP-TjO-FJNWrA>g9CKPkP7BZ*Bv&=?{`{{xIM z&-zf?8PkwFphzDm(xe^XsP(~c;2vb8cbrMO^$qyQ$u2a}K$TW45xd$BgVcJf`e z-L*E;?ZZZSh%x4UF9Pw8sf#kAXdg;iE=UbldyUoFmNQ^a;XE5SB@l_k=zsSxqs|+s z1CS@4gemfell;aYw?+D~*<+BNAnqPdZaL^qn(RpIXKUu&=j~yA$Z7^U>6~354ry|8*1UzQxVnhKd! zD%n&5uv9#eD%h{dO!YbLVj$d;j6AQ488K8#U?O}n2|1^PEbKjkr`Ud&%lIkAoA=NI z4<(16;n@!XOT$yC;b>^ci;u;ZV6Eh7CT$VrCy!|Qw*L1x(h{q5Kb^WafftK{H7pLw zA3z^ZZ>3j92Q}=&_#C)rn9RJz8v=&javfwzc_yXMeC4o$m795-gpHHO(EB{gsPQAC zhLUlqj3laOlj;ayY4(K~3`Wpyjq#m0)dcyvTkbi=n%886%xfH?4ONda5 z^UzryQ|n?{tLK@7y!mW`%Ggrze5&AdpPza8V0i{^IE)?4_Qum7IGK8Z>B*hO zOgdYB47wD(kct>t!yTWNE~+-{BQT-U!@B*8Ohw-Mo#BKxGagm!FD5%<^*9xsoFD;+D zxZYZ-<3%@?2kvzyA4lFMecn>m#;+kVN%HQI%5& zt#(>n_kTu}XLdvYU&~c6vAv0Gl^H5?f4|wd4t|CTJkh;{bd33{uKX+63VI~ul|k?3 z-f9Y_&n9G1R1rf9uZAkbv@u2_VQ-@_hEMx+uraah+f4#~WW3wd8B{_N%R9(I2T0fi zAO63+yn^j5?=-HwmwVo7do`Yo;!<}V{Xf2(cN=q_O(Za1 z!MYfSEE(^ij7^X+Y1NOP08Vo_#=Y015$9A2rGo@Q3y15@SBQBqRC9$icasdEwxvr5&~p z9o@%`Z2Hi(zALT()x&*)#O3dA7fv{6yRDsYK1U5N7w0W~(i9xT_csn%GCn048t==e zUfOo5Pj66$z(PMeX3MRRCF3)caryFV=!94BS!15=CS3m&1V9mfj)eTOdc6?caq6iE z@4#Gaf4d9Qvh6-^#M9rsh8W@aN7h9JRODYEIX|uajeYIf9{gY3A6!CzR5V$>X!Lm> zwny^Z6EVg9C9*T1MsJDx-1z9>IG8UR^Ylm_J-ASkcb!0r_$wqn0>stefB)En!F~p2 zf7R&oCZ)pi^t#Zlf)cPqe2pSDK?EMy2L_yU>KVu!+8cEJO5dQ?jel@oHzj!kjsU$9 zT}(okoNrJL+o3m67P!A+C+!j+=LW;febZFr*{%gncp(HEg%$m`NPh(A$47p`Wq_>> z-!=((pTaufq#jfPzC!_H=E7V?UtK{2%Xf|Z5X^;zD)#S*ec0~y+CwyH90<=}HC7p7 zJHBu1d9`2Q_}VWn6Ijvzfb?`tz$rC#6A&`6x*wX9Jt(pM$cHJ|1~&kKMaq6eWt?Lc zaX;~BCBIdhoz$X3f&lkpQ6_;b-CFduUb5qEHv))hMYg^o+Kh=FUc#}Au z;BWp`ebMz-SKLocS)Om&puI4=T9-%~e@2a)pm7~rbKrh%%JMoE0eWG&m|)2cB2(AUD$;FUDgUQN%Z|u|O%^F!JCMyegBK-qN z{SZ>!niib-?vKWsXG@F;^eVQy1x#`OiQEhpie4)QWAyjeIz6~}huxn0vr*?66A{MO z_jpJ#|AowSWBMGRON}88OT}3Ob!@pEdSgb@cv#VYH5GaBb_VHn*wsV=Y5E&Aah;~( zj@pl|vL2jR?(e20?`2H#MU^l`{}0l0IOz`tjn!5M4()w4{H`x|4q!%)D;%n#k?o(R zBhPPI#`M)vzA=GB!u~~J{46lCx+c~j`2Xk&usV$Y|F!Lf`iBSHe~mWpMF_?d>mtPz^#u^85;lvDv%Xew7}5ptFG6yN=`&|_1vyq7 za3$bE6mTs(yE-tf(qL+X@v~hYbQZbLaAA{?=dBi;uLE6-$Ci|fs4lcNUXF+LnG-K+ zBJxZK0oj_qSEIXCc}*m!e!cTLwc{ixE(lHZlK;v!wtM$7RSt zcYS9Wsqi}8+Ky$$xb2MR!(h}H@<{f}8g1V00ipS4*Tq<93Ar4Fa0-n4R{l7}UGwMC zFE&6jcbu|tc@vP=9)RMx$D)bz3go;YIL#ZD_|p&+-RL5*DsprC5KDJO6EVGmHnp=|Azcss>053gPqn=AZ4;%KmIML5t-FT13?e#y_)EOxl z;a`LJ8;h_&nAdj!2HDWIwt3gtg0%Y{F^N< zi(gd^RnT=QtnAC&c{0o4R8d8L zBI)VB;*bQF&mxaqoX5^@#9*wr8KchoE|PJriYCgfq@?#CFWh=5>7!6$JDF_!gl+0u<4UeyzQ+39f#G&8qk8*m zhmoH)mE4dfOrj^?OVORBs2XDo&&vS$`;Zo4KW0G8|9=-#lsA}pMBhr+#g>{=sOAQE zUb;J*%PTl^%ke*PM>chqTzd41Ky0fcB`JX4I?qo@6yNFVR^7~_dq z4=4=Zx^z(j6zN$cWhap2ES#Ys9VUyXAVO~2d8_bp-_;~c?*!Dq4bf|`EAGu5hc88E zQ_+!7G%{6M9tQUZH%i~l#N>Vd1(;V;T<#8m3MFg_xjTh$L9p3HRbZh7yR`k!rnhD5 z?jELL`ZDOmpsESL5^zrn;CPuX;c9Y3Ga^oe5dlSrb7`8MyjevXXsw9m zHBE(=m=?uc0b*iKBIYy-U2xm@^J0uABsEHU;RUjBJyZtPocCX8GVWJ@7L7O0n=Ghk z@l&FwW&2Qget0GF69#V|TCet6|wE@6G&Et`zIPAoz4 z6_nfwyIcacRP?BVpN-8{?MDYo>2VLM>u*qrfp`^mWj{*TQnErNo1kQ@Ey1?4uQTYt zd*RMAF?miC0eUUENGz2Sy5u;@Vec+$g8t5KXK`tWvvh_?Su-#ddF$y_lcItr-XVE! z%xbU{H$_J6kTwU4cSY))fm<~pcSW)FyVwFaR7fU|1n#3i&Y-Ic(?gu8gVhHFtaaUf z(=>hP(^oC5&|G2!kXF1)z!GtQB4~ZZ1|9=qmiOU@TQl;!Hxn^D_ZU2Jo=;BtT$3pi z=lSkl#+YYVkDYuGNnH0P7b`hA|CG(Nu>6DS!XK=f0!zewDdHv&QM8z% zDN-#HCm;@KfDjXIlyGbAekLg|8n8f~*cQhUN#gw}aa=4;;^yG&2~nmGFmZW9P1$wg z<&|%Y_)-FiggubL*nLE=ag6LbsXfHl3D>84kcr8Qiy7m6u40P&!Q`eJr=0Ak#SDU2 zY(~bdmV1bi=dNT;Nh4H7mV}2=0!@;b1_j$$umyN9=;LVB_|rYiMC8>#H5tknOwm7_ z^bAUh8+v@|=wk1UdxTNvEm)1oA6r z;mz=etmY=+-~<66LStqu0Lcn7d!qRmCo+CBDa#@47&f8B?24PL4H1ptKrm1O3H4!RYhsV-*Y$DRwoHK$@ON zP5dy>U2+9vH?|&POGKL~z6O}0;G919B-4@?4IiOGw>}Y83Z6^_99oKCw_+;<4%&o` z)^JZT33&^u1y<XvuggW!w@n%qKWDw+)Y)sSj{|48j@07vP>|+Rj1o$&J^5 zCpa6M?1?ZkxY`W668~$IPI{kCz3fyg*@B9bsGecE@_ge0@-?Q5n1jerMgL6FbFLqP z;;>lGT*_hnec+yD)Oi-qNMv1#C(>t=bQ4HJ_Jsb%o%GKO}4E zB=j8=noLkSFy1ghd8dh)-czkVK?#5&d>0A1Ov*RF?wPZcidVSg)Ofea$Qw3<>gz)n zQz<0uJrw3w)04cJ=wXKLy{011ppx0(Rp=rvrvXot?;|D0ilzQ=fd6rU!hcMIA@(4@ z-$?VqjRX=NeGH;#KS0`Y=Yi_?o3de(hs^5kgT{RjoyQ2Q1bm1BxGGQ_>ND5GC(v!T z_Jpl$ekO)kkC^u5!zSmf*m9+ zti1L-roP6&O2KESz`r;p-8fkjmJkNVTso`mUbF2!YeMoC9JM~->qZwlC&5e5=P2k% zREdlWSt}S2&02AvH!XQ?KY@8Ix)_Hn8DF3bdR&TGO!3Q~nvTski#?xF?u#ZSZ)Lau z@^XsH;#bNbQD34cn##7rbfJR)&kz{$WmA#&Z5xIcVOIsBSieG68m}DfVI#$}n0Rt^ z*rEMZW6pCIq`O)spo;x#WcLoxrEdE`ZQ%dkd(wyfy$G29x{>ESb^!3t9DY_2Mf?pB z#}A4_0?)$05W7OR_cx6=&+|IwyR1Np`CDY>AlF-6o=4Bb6iUc!aR3hI-npxbT-N=z zk>_;^Lh{e8ixF4}_zndyFv41e{}HxIFZ5Rtj06onwHh`{f7i6+**ON9HoPem()B&+ z;<{Bbal=5f*L~ka8qi9V|45whh+VT zviw|f=vJ`>aW4F41+D_FTK3(KO-r8B1Y19DQ3Xdlzah^i@{HZB%P@h#>cT?jfcvcx=h=V)4D9QmOU~~o=Z28uUHb5< zHsS5%N`*fIatmX@;(^EIWC#S(^B3x2YxKzhk;o7KMFzbU z_g52;_uwq=>%0OaqQ8-dbD@#L3H*rhccYu`B`~LoKQ_S=js+C$KS;}nIfm_Hhz&`* zVbQyP8e?8;MH2U4MiSM(NJV>)|I#+Rwc5i5_N4#Y*z)4@61EDM2>&C(bTZgN3cMxv zUn9)(G)$gcdOUGn00VkfS_F9AhB&Xrr*v``M5&N3qlx-i|1090fJb-6V7}0>syAt` zVM=y#2>1)_;!qN`DZJRlc&Z=PgK#S@;_ZEEQLw3bqsrHW(Ylz$6+A%!kCY4zFsO&Ys#* z3~~eU3uPi{yc#vKiD1e?LvFiwF2b_*`MbhN*jG1Ac^z1U@-1XCtQ1^>3fMwY&o~Aw z&U_9t?mSoFg0DRwiuQ2Qa-dH;5ae-(Q}+mCJ=k&AF@hk&JF z6E$$Tr8SRMx>Sq*;e^6n$GA7+AFSVb#kkV2xauWHaS2J`sq-ue<22Hg!z1%6~y!^lAf1(xV;NIL)KD?!zM-5Aa&EF9m+ zX!FJgtrdsrpNK07H>L!B{n}GH1Lu@CW;f_8Gm|j7i(8wLybsurycD~t;!44BRKPYfK17c(7b}f5pL_K-rX(-K zK1K9$(py?qE0IXn@sz~@CE3b=E&*FWZfiR7Mt4Dk3Rw<;3Vv&Z#}nlVq+}3G(z@qX zvC|C;J3Pw?ffTnh?z~kt!}8>I#qTCXmWta`#i%)jbx#~f>F!|kc^#@OD9^5oU`_=@ z(cY1?+zvAN4Pm0qsXmuHkbrFH?qpi>0$ft$b(%wkOfomCcFNgM7F>UseX!eNit-x9 zgzPH96X}U0N&#${)#=cxpBG{08tk!c zCLyn%m^K?K;Y!1HYG4eMACF4Y*b#T`V6{oN^1^D@on)HkQF49LuZr!fr+RY=p~Rj{ zu?(a2D`?n4kMs=#WMV`Y&qJY```ivwmiNL8$V;-T3at3=Ony!jWWz)s8cy5vE@HXM z!nWbNn3lU|)zS#4*EDi>43+eqLVcS+mUKvdV}28BnL`ch1Gm$(pZ4 z8D4r$r5-L41KE@u&BxBLv$a4V!bjOw?VoDr}JLmrr=4ZkFokHCIel5!glN z&}pV7&s8N9UqiY`AVqvSiQz)Nz#LY4x^3oGnw^O?f-O4Xy0QyM#~NYb6TT}3smz(a zeR;Lz$>8Y4nfAUuTyXs!j0io^ZaxuLIv7@pFjN2kJO=3u6UpaYzo!W0eHB)qM0F;q zXziurPTA%_^E|-L-t95=ylB!C(yK^sjbo9pvnY(tt!l@HHo%TE|$se*Y? z)thAo$vK;HE)6-Fv+{M^&Df92dL1K^Re*@#?j#ruz(c4hHVrPhdl*%oNi*5g8xh5O zPx5l^5z^n0#?Z9e4X3*W?aXel{IHK36m3n4y4bFkD|K=U;L zc^!yQ{43T)0x9Bo5_23fcjH(O=JH;QjZ3a+I;Kxl4QvCx%RnJrE$X60wbE(s#guv0 z{|8gBF1GH~mwCQ;+r;EWP(>tPClgV{zd(L|fMf*PS~lvub%qGyiN^towMSOYGO=fW zH{$qq>|!SY4^Dfp>l`0M`qXU^CzB9Y2_XDa?CK6C%zGP`pF(vT4SE!Q>cWU-<#B2;wh~?`USwo zV4u5>@#f9-CoDY*Pn`E9Cwl}x2(9fyE@2!hKEF1^YUlloGOu~3VMwEZ;=I2&Wr5Kj z=Dqy`_WkBpCg#1Jd6ME5|_(c{N)fdL+KTh7|1B%oY2vn;4>R7| z;{N(wZ~Ep9M+^@q1D6lf{^Pe)br1ODUXL*DyJc~YU?yMM6eg*AB-PO-v7pZxWmaIP zvmb*agGt?^OjuqXEraud>}o8yv^<(xICrp>NB*cq4el`}A+Pf>6feQ9%9vt)te8tX z7rA!_!M8AF+~bTn&t?~jC)UO8hASXN{CE;y4v%a(L3-^)c%GkNjMF>3^|$bOJkdRo zbnIl~=HzgcafnZJPs-%9fIuwzTikT`UB(mVlSdvA=k($cpOQ(p{v&!k(LI%P^akqF z&~Ss!{}i9s|7k{?H-ZZ&`LwF2qJKK+X}xHwwl%J*E;kO;aX6@ZhOy`EbQVr%#FZu5 z2rBYt>e;>FS6Qhb$#s!e)FD`f53Mg?U;MscmQC}dc|BMy)9OKWcd&BYc zc4Zz$k1F=(l6}Iu55z4#aRBGZvK!VA&M(8WY}33@RF`6wkHF}&pr?Bi~4jf-LbdbxD9Yf+f+i@%fFL%mHT0t!BD!nV) z$}uxDGcz+YGcz+YfA3Y#tAU-_Z6){n{;)N@(!Temr>DERy1JSf2X;+_m%Ea*wP!(^ z5*3>9CFvEE#Hef>k<6{zyHDPIEGC=?JAb7MxD$Ro-rUImjK2ydwpWpjYgiRWKs=`c z(e8VZ*7ns-oVL2^Sn(m09T!Dpsd$a<`IOZTGN#4*$J@`~{oxTG(;Sl;!b^qwq$j zOna>MZ6ma+5}5ejM7|-HfWLp?&CZ!NXKHW>IG}jnLS7oF3NtRH{Iv9&?P;9Y^H%3i z^Wh0D-gXu%TaPU%Z=)3UEu}372>n5x-L__!0`rRf9I#6i9$k#?NrS)>LRMLxJrK|(Z5Dirc`m*FMo zLlkrr1o`?v?P5dLp&>jj=0~v1WcJ+;sY-gs=p;gE6RIa zLy4Vk9iV7`Kw55UDxQqFr-RWLo;$g)I0R2&WeZ-pV~@IR_@Qe#GpW7>ZE93t+OE+& zlK7)_OFTOK`2UNaC46+Kf3rRHomf?@CCoL9Y zh{Wn-=u>)V#!yTDpQwKW^kauN0wy^h^ec4Ymi=XBFAo;~vujOr=Q&Iq)bpZWmGC9# zFBHT%YRM(OqsL9;^t8%Bz6<*Z`r)rGYILlZh8G8oeL8~PZPrTA1`6r=8+CC#%a%dm zX&l78!+&=LX{L6?HNmx62NTyn$aQ^itz@Ye6;>Ogsc>v@d(aR6bmp`dNnm#CD%2rM z#=j_o&fe?!x7Q^_lMHxr7T!8?yL#Vm|NK)fA8mmt_(uyCjv-d@Tg$pzZ8O-jg2O+?R4G zY4i7n@SGe`yq6{~U-7y<7~K8GbFaf?oIWk4*TJmCdI=6E?HUoZbX=A?EGDg(sU6J% zGeJvIc{!Jm=JRbz)Sg3%`tqdaY{fmVs3vx89lRP>aN4xlN-=J>22Y$ zxTv&gRc-YY0L6M$vhtb8t1L${*VC@HPEui51*lK}6zSDT%5jEWY%z^F+Cz0w@fxln z%?c5uX;49nio^JlbWKX)#HhGI58+#ICWLFbgfyQ@!}h$0CcgKGNaV;cFI&v|X3l*jMGp$lCs7t`hi)R=A!kN^|$rohLgCD-GADh9jUs z^Kck02XKZa&l1=Km!MwV!j%73nDXDiMV*cBj(>TDZp}r7Bq|BLp(?8`qQuuQ4|ZK? zV{Q%>U#6O~YlJN!8ze+=Homn2mh%)&!|e`Mm%|}0B+ZBvn8{F~4p}k|rHrG;Htf!U z&djO}dUkfV?GirBwT&kqu#WObB)0`&W#A(y4fD({#mp+L{Y#OvQ+>{zPELNXrHsQ!Vp0dF1cwEWN zTvXaBl8|c~P9u#3-kbtC8e;ZUNR1jd_=a1!mbB3@12HLefMUHRS-B)G@oMNZy_L18 zKC(oIBb+*|*Ar5GH3(J$S`^Ui=fU{p+t`nEA)^NbO~A@d9g|c3NPeCIiDVr`S=U6p zbh|MN!LD66JQ$}7&4=FVNr_ZIcl!p}F+9~e04&kDMfd89Pe%e#56C*qgn@uCcT|&Pm7TkpjEF5!;Ae9U2=}6 zoKlc7suAY(?VWsdOBx^hVR)ju1L-blU(d+A@^3r;k>~Q@wEX?0h_{aH~t0NP5gUq}4-S zG#yMJSYeQ;Z4{Ny=d&~O&@Jc~z7t)^1)R?hL5lh$QqvXAX;$>d^l|bMJP-8HWeyq` zQ(>?f&inQR`cR}A%nbLi-F2m{&#tURXse$K!YJ8yp=?{9U0CfRudn@YTPN@8lHfi} z=-iQd8vrIKUglUL+d(oKd$~cs)8oyV?kwzd((Os4IZ%9KYGww+m4ICoz?h24t3}s3 z5>qTO)7Q!Cl`tw7XO1fgyD5QIl}U$m>X($yWiiB<+7dg z!OpK-$;#nW*OJzYXaZ~1p#mLGO$?HC8fDSkURmTWWBXS#$llNty+h$5tkseU-jBWY(*8Uulb+<05ili@V^WyxDc z%D$NWCEU$5ruAq;*d}u5(`H7gJ)3G75jgbSU6Wh)A(UlDZ(e$BZ{FQq=~Pl}4&l}- zt!IJ78LnmDXEPGD7H$~C3!CdyGvOkOO2iuxQ;Yc{5N-AZpi+E?4D z(}qZ)7_kZ^ND)ty`0}{t<=I=e_>U9GFynl;O{#u=(YT5zqL?KG-=cPyw=uW1Da>2u z^xe#N7FsJvusc95&m|mo?!@-5Ay%7B_Vo6*_I9xOa^4lBc~k^o z0#p#1=oU!F0D#r2Ex`)~3$qA>&_IcGiuMHhz981Y=df;Y8Ra;ZXo&j`I!{!lyAr@QNN`g#coE zW^b@)C&W#0UOJ7`*P-Wn+DUqH0#lS1)A%HKg@S2vWI?^z57;I+e{Pp655hM80RpU_#Q*P;{JYFu!sRX zuiNg1$2#fg)}*&amay@vPyrSB<4Dd0D9*{6qxm_z%EIx(<6X{NQXS;T8A576e}PnT zpFp|gO@lMbD|q9c=%VPtZ+^+gzj46;MffBV@@>m^qGZ|FV))&Yoj0vJZQOKHL>2i{ zNWKB&ICLmZDNeRSjH)2N3|6D24|MmR=r?pf`2 z(F0*;GQwDKAf4s!4NrGXX-*S?#%9}_u>jFg43hN>%Ho99Lz8(yIZH?q8lLGo(z*&q zi9(brp@=EwXOWqMKzVS&gTdil&vxFlF}``QGdyuVhn#c~P~N)J3GlGBmkUhoL3qWt779QGOX%!Z9w$&hT=Vkk&Sv zUkJw(_bbTFE}+c4i0qfM*bVwhXHJVIZD!68#rrDqa#>;cM4#;VwO2cJ+QT1xicbU; z`)kNA|zrisvis z?1a~^lelaQ4g-q#4Z2-T2F*WmV19rt0iEzhXH9#?0x0+GrpUxPMS?g^}`B@M7@KeZVDQjlZ;i}a`MjY%xcapqUP+%Qr^O1i04(l z)771xq)x!5N`-MOlJ_pki)X#f&2io^(ZS1zfKdxvfpLU`9%iSOdPtnqA0TTN4TN{Q zz%>64({m6yKESLno<|bjLy0y;j=e?9tj4W^;k~XT(Hk5QOh_s38_~q~KJr}wRqr$8 z&UvuZ-|t)-M?RMEmO9}?_5qS{7}k7eOP1Gcrrqszv1xrNe9(#078HdPtGkjW&0!_r zLlnRfsNKQFxFxLJwU_oz_FB>B7e4IdX`O^nj9i5hq=-L4VlFHX`DnSiGquz{e=U5} z+0%keb137_i<$S=ktO0|6v4MsZM51f@r@Wf;eA3mVjncv~H7ZmD2U5-^}&~{w`oND&$h!__@XOHYpZ2Q(^dXRA)ZAR z3HuU-F*Gh732)O9gb;);yNKiP%khJ4`DA&)h~+C};ler_Z+S1@_AVkpCVHy@2W-IF zKLNRmF(qS~?610nv_&Lgv%A0$XceK)@H6|kkxyE`My=MhF3!!gEJ%zqVl*6EK>E6? zN}FfbMVS@w#QF`gvc=Xvx~VBVJS4eT#*p?+SCD20l@Uj@j{=EgeT%Z#ZmV|FU0{TB z?^;}mI2WbxZP%4%U{oPZS@pwV9!dKSr7_A9HPga|UmR!qT~}~Bd_KO>RyOGhM-1O1 z!v-J?Z>_`&&yF0k5c~OkCr*ogtXbwL?5!+=OU)0ch8=ss4^gv87>!jG_;rsy_)DLZz@_6?(qStNY_ZwG1>cBx8Gh|L(#)L7 z@O_`UO|g?Q#)w`Y(S z^DE*?z#k~!1`tqQMcXvnUg2R2*>ZIFqpL{s#0kwbs1O(ED$o-0Ckmkn;WICcZy2Zk z?98Lzfbml=!xPt23r;(@LLISyd_`AzE3*Q_+3<=w+T!jQ8iTnpea-o#HD7wJTH1gB7u*)v|)0Lzxl$N^T zB9f^7MJo1qb4x8ot6Sje^y*-M(f8j@mR9?Kusa@x65D^s#&@hf;J>{;{MR`XC%Uxh zjI{|;+y}uoVU_Bw8lE3JRP`eGO-$!7de$<&U1x}*yeKKzva`H-r|~$CZMM2|G3UfZ zLX)b=yqBYi@8aYeUQe=Pt->XoarE0Oe{QoA+wjDBNpc>Ad!S_=t&ZW{188~JK!Dj0 zJXp%76E5X~rjn{4dFMbKo8(@aa@omh5q}v`0mb>$+){hKA1>qC(mGk8SxZ!*7Ft3s zOCe_MB9#oboFIPna!z`ItX<)V;qqdb#j)ElZ1V~iiotQja78lQID7VutN)f|kN(SW zC8wWBdg5$S5q5n`|IR3(SEkU?h=xs%7}2icf=0g+`AHSo+{9I-O$(L*ob3a(c2YnuTn-aS;Z3QKQ{=KYVYS;|oo1If-3d2yNoiALA(0Z zh~e|Hmb!bxk&$75D}%BdZibbDTT#KmP*8Xm3YQ3Mh`zCK zYnPGc3tUcm?HpfvZbLn6YsDjFn7KaO)&*d)n(zdg&Zi-m*p4IH@YHP|lU0V>IbqtE z&^&cBJaHaRPBw6#uxCdP^Q8ObaC>LPMZ#?kP5+B%;=BVn88BR4#Lx_kt!;**vbDbQM zyQe01Owi|t`Usnxb+m&*HbMRNngXJDParR?$x%+K-U?SYkdDDsq*;@~j9q)fk~<%G zMj%mJD5|)`T(G02!&X<3Ru>}&7#IlCqTNPX8Y1{`p1WG(s^v*BFK}yaX%(l%pXlrp zNo`SBnMIyiBl@(4L26Etn!03=ctaRS6}G#YwAqGZ*(32%AkvQpq!ANshyWb6il*l;MbBk__gAt3(Ly*@4+1BJXi%3dY7$opmzlWvebV z+g(;aEKx|%X%xgs8T}ZH4zyUDE=Ixj(&G7@aJnl=d$58r1uB#=MSTXT4+eGJ>zJ(p zGtwcP=^E0)dmOLvWiOyh%~@2#4Kl_2nam-2H)l^<3us852`EvWO)55meA58d=4Ka} zT&C5<7Ua3LO=#YCcjC0EUyf$?ofm7sCFLHvYhN1R@go@Sr=0u*2yM2t6NY;O6yZ4} zYz3+_8``eq%#liD0_4C}4SBK7Pm=RU;4}r&TiBE=0Os-(eiuV!XIxy` zi|U}UX*MK0C{jq*EOjxRsOOHXPqzwnSxxYuuh4OUcOO}Mj=G+#t7R@Joug7(G2W&E zGE4C=1s+m2?}A3}bs68V3aFx9AayY~E3-bZ8PP23lQNUWJPn!SH!QlMG)EkJwXvC5 zMi5HuxvHn~)(hB${XqzOUDfCp(3?GWiy-k$Z1eFt+e91z#kxe+gV~O1H;VOFaWYb1 z&%f*f(z+kvnF19MQ%?VUw$IJMC8kR;oD)^YDxA*5z)qaYjvX*NV*#fu!t(36taI?a z=8j4AQbjg-)ejr#By@#B$F22kUTB9lcHf09NU@VOwQ!#6ol4T1yw+F8Cb@xfZ>V~u zW4yJK28Y>BFe|anAK>B$eHWKDvRb_vTwaZh=n^xa7*6nutAhx0qdz>Xx`MQ3E{w>Z zZA0dAV+JVFeI#WkRCev*tR+ma!+vK?Gv5VbtSTsr01-udfV2!M=p$XtzD{cbKjyHF zzXzjk%}F=n55~9L48k~v;ly~p7*#WNW-8WIQMvbY#?f}Q5`LQ_iREErxe-|80m&?rev5-eF&~?STt3|SN1JVWbB9pvDGYsD$0dc2phAW; z^!rP#iHRNdh8&0qlF)l0t2cwj^TbGxbct!sXu`(6ED2w-9z|KSdxvi3gjF8841bkd zbql@KrP*!fu6neq>)^ZNJKG`j`U)JyDaDVWVupHUp8wr(If1JRQ`6WJ&<~GwacR+{ zHOmP@pkjX<+1b!WA5R2(IAF6?c)UwVizFzbS``&P7(N9lqmj5LP#i~PWRu?0neQAp zIVPr#zLqDtmbBG{LTc(EFfl%fj5I#-+)d^U=JBcF$xfIyK92C>5l@^?At(D+`0=yY z)za;Mc0#tm78lk1)X;i_w#_45dqn)+5V)mc;RG#kqX%>(` zj9i5xqG+E%S~`@>3$uV!x^s(jJs!&zlSn<&iPPG?!x*!dN}%F?7P$`rH{JHF?U?e> z9$(zou}fs5!3V9l4*hJGk#?MgA)17-*i!Qxs-c_59j%i{Ugpl$m9UJrV<9|uy-MT{ zj%rk@!LuRCE|4)SD9g%>(= z+JZ_M&2GH>kruWx0?Bz16Rizn0f*fi+_4tzX8cJeUQhLODI(_&JZEE;@vD-L}htN~{!yxA3{&w$5H zDo0XE@LMRD6U$Pj4hEEjx4Ni}N!26-t0omJ@dx2V_%;%9tq}Q%qPg6e&4S9}O7h#C zHqGi%o}&gl@xFt+Tsa+elm;INJj>yot|4sz5~kg{3Uz07WCRlRE{ZxBqH1Qj2=hb6 z1ItU+5ASwCX+7C<7g<)Jt_z)$~WmsMY;3ynE?9>~RsGGx|2q(smkn#Ee%n#|vudq%Dw-aWE zA9e1u!6b*U8_$dRm9Qn`V-&*H!F`r+Qis;Q#s1=;7h0PpCQr*Q6{8V8?t;=R00)ij zG=W08K0#eHOA4Ek3zxAC$751H>7vp?FsLPYhcYtfwWaA()MNv1JJ@s%0hBhK!}88@ zXBiQ&{qSiQlh#rdi)j_4!Z5BRe1;Neh17IrloaER!)IMZnngXTE6ZUe;&T*1?+T_) z7FAtT))m-?p6CaOXj_*V3f@BFKp09`p>w+sS`>)UC!LP29$G#cwt^_2WuaSrE zoO0yD|EQc#xK#aD^l9e&|Xc zoTM~9EKEPb%1qWqtit|>nsmQE;{DQS%=X~MO)9OKh0e@gEV+gsyYwwd(rdQzRv^Xv z6EYXq?RdZF?(srYvfkLq@E8TcPhHGZk{C_kGFYoaeKwX&I)6r;%w9KSbjBlf^vXt) zVJnXX3O{$DY4IjQ&?a^0(?&vR{ROqMheiLvV-#8^?mBfca|WVY?YEYYW6ypQe(4g^ zdRQTvG8G!(CFoZaq=!7bj?CSeC#>QaJv?-+B>&ol%_TqNb?u|AA(h6g{7BQ^8YT!iqJEz1#I1lh-n!r|;ru6qtnKossNmE+H6Xzeq zS=_q6)ebxEgg>s6v*wMr&HEXiIR8XW4kp#<657&~g%JGNiARTxX+cy#R>LZ25++p9 z|Aq8)S!DC4;(^9oJr93%_B3m*gkus)pTzTwi7W+wqXGswXAjAqvt^k4yHlqviq$-2 z2NT;r$aW;ys&P;&NWj>tez=<-A|KDvTKK1nNqhaOP*xSy566&7^uH*Y>sf&(VYl|M z3AOrDH~<#8)5E`AS=!4GK}=5_o=E>gQclj{Dz$|lTdCyMq<;9XQ>KM3m6uP30mXUH zMsRYbXTdZ5*3{-LmvD}?ntw(GN{-T`9jsgO2I{`fOlOw!~=aW z4!pUTGpALwAdFfCI~2t5M0s&ivUAN|6di!FFy9`8ORSSK!-#iViiMmZo;WWl&X}?r z$At3WgzQMkOF1Vl5)8_d%itUF zNvAE)JFwlom&VXY-({(91N4>5ZzQNj1B{6#`$&q_o$ZIqxvn%5!2wN|muTJBz)R2N zsfVjnF}3@qQ>V;h$y2**Qz%lRcES~0O4`d%mnMD~RuZmA35=!8{aE=GJz9vR#IC7u zC0CIaOPPaMjpW7rDo2-?D^m=e+#X22sqU=2m=^Uat|-kn<(MYOOU(>YbyccjtFD<` zcPzIe5;&Kp@K0!zZRDS^XmIm|W7$9ElP z-;6(Lt`nJI0x@nCV&H5<6zz3M%l^NTve}~HuIH?2BdsuEJ1(b{E@mLSK1t1oQAxmi z-fA0bckpuEz?sv`PAwi9vkGXZHs2De?)iqK=RKE_on?7w4tC}=yRVcD!}Fa*FcEGb zA-l5Duuw=lxu?B|O$6Z(=S~a#cR0IkFLBHn0+*IUsb#}KaUqo(&$8_p+;+cdfCEmD ziY=y?J`8^uN5Hg>m6Lb^niv(D&?V<^$|<{3ZI9uNocw}&`v@eS86#V~-oBX6@wq>|gB+(QmR7)|YQSMmJZXcHXi zGSdQ99MPVrmrgDor&Yk0qNAvYJx#R_MDPrb`h!bns*CN+;b@nWw#X?+5 z?-7#$5%@3s%O#+sRlc<7oDE7>r(I7k1aPtBlf7tF4(i{dtvf5U` zk{T3YCEzX;z_BmidcGN1j2An1K{gLIIQU~1XV$`9T}7H*o5PuoyjX`WF*_*cN)Y3l zu@`AqO@*D#egb~m+&7KihaCY$xQm1ZUwY=qV_JjdPH%M(PIlt7p{&`N=a}N&O>VyJ ztRPH5Tf;#+n1ff7;v_G_7|Gli?-5$f2d>VVA*)@(h64dpL9g?Q9S4AmXJv#(t)=r@e>cPNFTLOUzjmb5-13X;L&F zdN|(kZZ2RG{$PCV3&JX21)F3Uo+!^IB@JKL$8(GG*`C}z?fs=P{JHFw`{C}c;>;u! zm_!1#7|T||ZQ_x-dr%$CreeO8XlqTmlC-g@A>)dHiS8WIaa0(r1{`{bjoIxLdk+~U zh}7H|65CFj=IhCj>?t&&ihr8?8xBHm5ZE&zlTl%&I~!BRA{|(#8)opA>v4AoF!l&z z&?RS6KB?kM%>vcLksEVMB%eSR(Oy2CJB6}+V{zqg(dDGo3s(*SXHO`*Zh)4O zbE$-*sKPvU#N9bCA9i&1b(X?jm#__gHNHlKV;WSbfQo#H3yJBAG4X`{uU^-k2c14KA%-}%iIdhurAQ-!A70Q^RULo}ra6gvoeIL#% z*xOm_hx45Aj`-d9dM?|^B`h%oGO^9mWlL`W$zT{V!UH`q<^&e|-FN0RBemoJZiJPH z0Yz{;a=S1r^%oZB7`V6U%%f|0ylLo|qTNT@BS1?lI=-L7XC`dz&SoB(%oP~+yP~u4 z-SJN$If=>;m4qIk&BH5BV_Wa-k}mY(QA8B& zeMox^&`x2tfq=`_?zNNdy7f$CHpf{y3*o-bo7P_m#d-}DN{}MHABnkuRGEUq5=G-~ zV|(fS*U8;Dt9M9oKY-jc?|l8Sc2zU3Xm-K_UC62UPIG&3v~{dzW*k!XAS%0}DMQQ} zd$+jN_Q5V8&8}lljwk9H|()#4!0mdcBMRX~@R z$5705Af_fC$7~P%WhXq=b)=d8H9o0gLXLPD@Go8#HotSKCYtrML_s;nJ_(xH+ zXot|m_XP6Mwr|Qiz(lg)iB6qp!z$5$wSrO5((xqfI2JnCexusqv3>%s`T-g?W74)@ zr>sW&C%e39ASBex8Um+~Y{FldbrMSSQz)7(*L@j&u{wZ#w%i%4;_USBRM(d_>N|=F z^pal`!KLMC)N%;4)ND9i?e6XN_II%lWR~fJp6+sX>j3WfcwWq1jC1_j9s zX{z8;`<-oAYgnA`Vt;aYrVBeGNm#PwR-%!>XHg($F8Q7v^fh^;*t1>DXs4&TzPim( zML_XBhrBljZ*{>0P7-Zm+OgQj1}5}R?U|Vl&hUAzYiZ+);~!ZS+8$l?!)887eIBLW z7>sehz(m|-L~lf8;|h)-T14LO=ex|al4SOI%FUp(F~vR^Mo?T3}lsq=;Wf;_HHV*isz8@YsS;E4<#>?}9%Z{|Fo^wu0^=wkctgzMG%| zmWDS_17Fe_lh3@x;f<~!?Ij&!@foPt-$eFXf!%z(lasp(ecwqaRyta|9NgB*)JWXN z_GVXhE+EF&%{01h`>WWKTMb%shi8w#$lox7h z;(+4)6nX0$WE?@^HVL10;%EJdQeI| z3&yxr(0We96!qswO&c&vXB&Hr*39C{0&MipJ8_!hrJB69geJ-_kn(Dv)N$@S<_ya@ zSRC$#FFNC$@E7Bo)X;(M1uzkQiG&=gcVe!#X{*IKv^H(cdML#H<5gd3`=O{`b_t2& z0#MsXmW9lO(XWz3BXM7$xXVGDwU)&`tcA7kRhMvk{APSDlwH6DB(Z#rEcAJ12AYS4 zF$o(I?#@MX@9WN+W)#$zd-cfD@eS(WYo^XA&4+Kgg0$C6IHgXl0xI%v zk^D*^SHr_{X$RRW!nd9Dc>H#Jbq_VV3`j)ZA<^{#C3O;nSspUf#$$uRcbzq@vISOq zt3_nV_#S2O-7YUsV}Qk)ZTP;^r}d=G%hM51tUn+tr?~WIauIG~(-~V2(3b=2opx(B zKMp^10cnG~@a+CoXaJX(A5jdA&^)g(rniGIjr77{E&SLeY{uV=fAIC0jSGn4{Rw%m z2h7~~wHQ|1e(q-bsk5J$#9n6t51~rI&!~U{saxp0Ko-m}M^iog+{qJPG+F$y`KeR_ z756X5O;>&?>S_s|z3@w?#YIAm3&B`buplnNiSSnd^Rvqt%(~^|o!jBfh<2PcyoGju zTc?dNrW1bc8YYr7u&oX4lp4h#S-+tyzJS$rUdAwl-@1&n#wQS~3Kile!59@G+TW4( zP|&(Rz&!x%KE~R?Bz^O7mFxE|Vh_GB{ta+QQ{kmX0%`gKHStBPnj5ncW&Cenf zQ=x(^9D@|`pGbT~5PJm2LgxUYH~#F5oAAr#8y_FNVR)kZ3+Xt(De}FyG^a5&@7}~3yzI+}fB3u0nS$u%YLKg6{cd{vLxD{?{~?{e zie9@W&j{>{H~iCeo;gydBd%jzzQ7}e|Dr<9pn1Q;o6HzDq?|jJS2+CJ_05jdSFFc~ zdja-C8KtEDhf?`wl}CBz#au#g;(uLOS})T)%x8$=JqYiOEdkc;Gmm+2uvKX-Tm*j? z-MBlGYDRF@;8lPNRSQH}6a}#*<)V~A!_MDvnB)x6x|nm1wsFS4PrK9^`;|3D7N>pw>n(LHKaXyA5Kh!mnwscVN1(pSq*egl+)9gS${dFzcc<| zeBaiX?1pF}ygUhM+ZXmV6fR-lA!a^Y!Ku@1Ti$RMsbKeBKrCEFa4ET>l;mD7Ej{56 zbvX8wTuIt9$iYlVUL3}jkSkLN$2`wIBC8a)#OSW8xQ4XRy)N@e8B)|&CG{bsj;G~h z4hP)-ROT|8f~n8+r{QWYA}x}m!Ix13EiqT882TBiPGL*kw2Aro_rzTE``b7?=^8HR zYY^Q@xA=TkW#x0Xr*uStQWei9qAc+kYPT*`&*CnKN;uT8?Ci2x3JTM5^3%8f~P zFWcYlS_PX35Ke^GCE-m0hykN{GY94Z$Qx>5#Nm3bBkd_5bYMOEISN$LcztR-3>vG; zv}{_vfP=Ld+{=h@B%tEL13iMk<0-3Fr4*2FmNv)BSH#~tiCmy#am z1z(PJ?J=&k#(_|dX1 z>PF6&gZorj{n^Fdy z(Kt3?`^jwl@aD8<;|7+SId|InM&SsCDwgyhW zqQ52SIa%OaL_bg~zd&zu#5$S%%l5dvlwag7niiQ42Xp<5DnFWX4!fGdkuIc-uZ$lX zbI`=;RGClJa}-5#fQ-wy@&6U<#y#5g+#yL1W&)TCSgll`M0E_Q%u%p_IbjPs^7+QY zYKw{W`r%loOf#D5GV7HgMSUw$-y(YyWj6tHGspbFn6_IU-mKdUw{|)0q#EE8AvWJ8 zbZ1r-`V2p_j~n@<^)}SXUVYSeF>u;z-W^EoZC&lm|B2cmM(I6{dO5gH?&WeV5|b~^ z;*VR{!-V{OI9ZL0Gug-#Zs#h~2KPaRX_c8kAW_Ft)QuskK2VKzhxS(5vit2_*WHq8 zuMBJI${!Wx4kacDyaNStFwh!D-fPbCP{^QY)QEjrSQGf z3A6MLVgGS=7MZpNNX&3&*MN(JuBk3%dI3+o8_CNRI?3FUt&A}Uz8zp~;n1d~u*uoe zW-&r5)j%z@gq%Pj^lix5xHC%{GBw2wo|r9cb`5Fk76LL2DpXNLzlHRiGN}IHO__=t zz_&W@?JrQxJCeAz=~fw-FfmxehUdWo)(kK?7z{9H?1vMbGR=Edig2hy6!l4@rrlLJ z;&YkiU)b)ncS^d`h9f==OpJFSBbTIxa`tpP`(ei8?)%}cPMp@fax8nedC@|%LMw-s zj2)E0_iF`S%mFtK^MBYLiz`#eIMWY1T|%0Jd?nlaY62QSMZSyVhh$aCVV7}+=nvet z*qx218TG@-E+Z|vhBgWjO@<1M@Dj9}f@ltw*K^P#pSOzl-BPBV;!4siU_qG@72-Nk z5myrSPy!u^6%QI*xE!m(gwv&R%>C+sk z0;{?EGO}cxMj0G?yk|N|!xg*!KE1kKOh`_5A!%ce;7o`LJ8Q8e(}o`b+3l-PMHT%$NPl_UyX{G*_lB_&Q%;#?{+Hu%icsP?hdf7v zhjwk#9-)cOU^hLeTkV8IT=wb6Y`d6q@cr>~qvX9pwR93ZO~IE|#USN!m~kD)CF!WC z6u=07miSi&kS4|$gE1<>KiP4fv|)c}IxHkn&5?@F7^4o{L9=~qyj=+MPIEz?aS=*9 z3*_P2MDAM{Fhc{T-eGZ_Y{RYvL5b~Lvavg@Su?Y#NLa;os=dye)}0E>ULF++Bc>xu z#u8;*E~{M3Xcv)2CM-K^+9NDC71nKrB$h5&IHQRU|16Y`?wiW4fo!VPa{*`JOXJ^} z{<^nkMIz~2p*}_(6ylCn_Rog%Tt`|XYX~?3l$ZjUT3~|HXRtb0L?rc$_RY37Z`N^s zab@>N{3*~URFdf2vXCA!-q6Pfsb@B)()&pdvgR#tt44jC>S#wfcBs9xrQo|GH z`Q*G25K#%X^Jv%V(o*RRBU(0dhAwMpaNt}QJ-r3!2ss~wNi(PCWu_G zB~WqSkKF9pXc&x+JOOMw+~2jNd%7-g;0YNe^Z^vgMwV^NoLI%il(4qF8z+Gb_B;{x zOF*`+fOVD!y3VwqyPEBuqZy>@LDWSTx5as%-V1x-gX_-r_O~o=*n?e1+Oso@cRx|= z;njkQ`XQvg45&F`&A?}byc$k-JA9rn@C;_-xiiM*IAVBMk>Pyw=1bw>7n;Fv#PA3* za7J1RzbIu*43Bimv_&b0vYYY}<2p*f((ov1po6-xbD>FxESYf4bD1ch7#{7S(uR0p zn8da+?5iF^!2(%!00DT<-k;jykN&E682sZv3cl_IiaJdQHxFT-5LRucFVjc;Ww zJl?rC^7n91^p8LCNSr6sZ>_8i7Vv`h!u*lP9H}AA5tt0#3xX5j6G%u$B~;~0?4<%~ zGd$7BPe~%58NIGv#h02VQ4QTmrB-FhTxL7dtMlQ>t|M)>=s3e|HAfZuQ^?K{DGqn*S!lGX4>`((p8D;9!-{>?iGPUO3gbzy0Y>pH=}m zj48;AMO2YLgXHEaFoU@j?Pf;#b8urG+h;m`T7R1%7Fh61%_w4gp=^MGT~(49_D2E4gkt zpW#j6`A(R2hYn=b>?N%mW7&$Z;(r18&4gV*!jsl!`yY>(6N&Ibr{0)Uxy?yw2`9=I zk&?#6(8gzW$V0oUU+f}IOA}#;c58%d4Fp7YY4D^K8uU#Q7R>a^6`T&f$-q4zG36H21nd z?3Pu)=xFmiB8v8Pq@^KO_Tuur%|avl;q^|QHa0Z+af_%D@CMyCRKcY`-^v!}Tb*S@ zKfcka)9za!cHb%#5k>nZ(%SBqnk2vM{HNe&=;2_MZa2KyMWh)i3aE?iZ7jYdy+t)q zNsMCA+TpD(0v8F*t~t?);feHZB(?Y55@xiv^#axd8IlkuXyNV7o9M)kICk%NS@}e( znBsm1x!LxHx4+RhWdp}Moj$F*8MXUOpyGcQ`8j;lPolTRjbSb1Zkpv`aqo61(@9St zM@@`2st`)%dnj`QDCcCD&fUcDi9E$@~Ci(u-6GjW1kgcV^Zv#zE$N`=Coqn;Wv$;-XBP z^%u|!m%~I-_#rA}AEb^-Kl2v`(}TE2<-@M+6J)NT5_G4*@Y>LqloVLxGPArVVeRy z%dish35loCP*R`I9ap%>ogcJq{H#8ke9Yk$;BdjBhL~OW5HKpLNPR;VmuUMnaY)%0sEi)M(j(CPE_~6IY)n#8U;Tm;>6b+6p#bp|B&0*? zw7cQUPMY@2g<^NF!p8B@_K1#=O$y{{We)ORR~j<4Lni^;gq>(CgpFw*b3nzY$PorO1qDh1!8 z0=~o={C32nA5a&tal`kWJMDcEiq(S(B}ftffW-91*s78_Yj$!IR{mc7(7DqbemRh_ z=fyIv1pJ5sE)4-(9iRyte(Z$D;p6eW_we#MVTAt^;&X(mx?eL1uq6D964<=*<(4grZ9JCZO3u$+MA}&0xRNsjEg8R{3_fbbq?pJl^Z`@T zu!HyZ!!KP!+AMY`Hl>Ov>R*xCW>B5!>CoGc#bP-1v7p2a$006d{o2XXDqw#OWsjjy zu0Tu0Z>XY_JYl)VQ_+6w%(xiQ;KuK(HayY&j&$XK83u@1Le$?oWm@FKaKMb9#P$cW zv13ICSHDWW+Rc6y{^;y!jsihiJ*zOZxa9bf^e0L>1d?P*Zpnhchtji!Kf9u|8KDqO zkqV9Q67&}e;u9&GN;od_K=`XGNV6Fm>?C4|?r)?sp9PO$mU zNU8L6^{8V12idQZ-K?Jtj$`io;h)Z%*7{2KT7nYUzeq-}pu6YdP-n?Y!oQsl7b9LU zE#1{Q<_dTs{SQgmp;2E9PtQCU2)O0MKK$2-(>k=$jn*NG`k)g)%`Tn86Fu*h*l0V! z{v!CJm@6V}ToINvY87gLCE}tKaWF*4k!L3AN%6dE*tS?9ji-xU%!Q=w-xr<|9|p;dpb#KUtvhC7e4g_(-xbK z`!Hns;ZiOjt!s&xcIyf7l5}ZGIs}q3H#)oE6&&+AiJ#NG1IW09!!Hn00{iANE@-ri zL~lclW4;+(dM-;nj02DEQM?P8qm)-GtNn$zM7>3N9t>b*YJ#tETHKWoW)4RdBT=?k&U~MGR#$6?nkQm7IJd{=(j!k&$AN=`4}J(a4A*i&Xt=u$)y)wM~r0aUWFPTY0sWN3H$ zNMw%)w0>(f`$f2pQ>XPI0<OH!{-sW;E04#yd0D;A7?XmQ)0%`&hLu*7sInK;ptO)l>-v&Bsz zEh_RXALg`a4h2D3+f`v0R}v1V1X_hMTrwY>d44Pl$&FklrId|G!HyhyvR0+5_1>BgG zK6Fe&bdQ_!XUAeUOVoG^7n2s^;;``}=xVm)(E9C#aI8y7Tu(c12_5~h(Zk;eD*?BnfE!T2+A?OyJSuu^ zdAhe`eFlBeNMr*`=GHDEZPTQqS%rA14qHlYLnU0((zsw-tl*}%rYbKEV@t?J3gLuzKU~M~jJ8h4 z&%1Wkwk%pEGVGmbaj$7-t{*nJnzR|X5KW5;jqnn50tLB|Lo3gWoXt*tfsGttiD?U& zIN0U>{D~QJWzS|FeVgr@dwT)FVOw3o>G;z4SGUo%KZ-@-wox4YFxYP1;=kF&c}63i z=pt^HB!XYH2T;Zl%}J!O$#mfWnD*?{^cpIDyK~(+jf)tOY#2?9cOfJD=-LAb&}qf( zUw3uxw8n~HPT$eG;+!|HUGl;-FWmWffJ23R7_ zpa@Q6V>km-WbNLZ1vG3vaeFLR!YFW>H(UzH&NGMoanRa}gy;HbJXgAp4; zV(ecoj0oM)S-c~zA}ObjCDvF%h)f7~bDe2(_qudSeS;F)*<|C?PR4a{;)&Cy7J9Sc z?#`Ol)dO+4JlQbJyNI-ym8u6h z`ehL&;E*C;Ah|6yA!HbjgCfhf_3CZXr`6^F7-3Asrrt` zrQx27OKT-UsqHolQp77HW)tQ5&i()klWTFO;{s9MSR-t&g!7y{t>1JcWA{=OR|*0Z zuk>oaAR~h)4~=t>c}Ug&r-4 zutYELnAl%bgHzu)` z?a(TmNbf~b)_T4gn_t>Q)6?(O8elE`xmtM|N^Upcs>+=o(VG~`=u;SHL#o*E7m z7~`N4-|y3QV1K@Pf(F0)J} zoe!W+`V`P<+tjIbvi08C+=;Wf*qvwDBOmAr(;VmyYZd3EktEXgAZp`OlP^bYu*+a- zu!^*UOX0yTD{ZPNL{p{0P{@nmCFmg(#Au_%nb!8r*rkk923I;3^Me`9LtR6f82}KY zuLBd~!^p^vRO1h#2c{58&3=s5@57xl&08uMBUYh|De6b)X8q5y8Tk?x%EKd_I4#0S zAZ6kLqG%sQ+ObjOJ!U$>s%m(&>qwi~7K6r3OYwuDD00zA++!$?Q?T;5wurr;;jyk{ zBmQ7~f8IQ%5l)PcBO}Lh`MJ^K&;V#*3T!!j(eNJc%xUI?pzQut7{--^Cr|>{xRX-U zY_^0gPjpFXkELcK)aX1lMk9q!qC$3J@(XxSM;s~9Q)`*YcFjK7)!ifMQ4PUb%?*88 zOD2s^p~gc&S3Hua-@_5fSmv1>@GQC3RJ+gB?eJ9BcWRQpBBV(xeo)ULSx=*^VM`5L zuhGdYb;HwL%7wMm1S#fckhv6zfDP=}&veo>Z${ZjnVm+?jtoj<&mtLHNIT5O zVQy2HU*-AK*+6H$l+|hb9n$AM+ZCh@a}I0O@1>C>()Ju`qf0x>(2@OE?9B|8!gF0n znw6M+*<{q~*O=CL>j$YKq=;WcVtPdKrI<}Cuu+he5*~pTfVxXP zWcPfrD@co%YFvV;LzkGBP|UR8QXSNg@(Ee-dR>l~;gv4sf0IngktO3* zltC+@m?0oW&Rvg z;LR?wog~tB$|3*z3>~PL)0)`ohs}JF`W8xMtDl{=4!)qt^mO5=J z`d!}XQqCMH#SzyrvoG*S;k&4iUJyK2)<_FY#D1EZEY|wnt|@KlL|FD%RHy-#i1$zg zBO`q21PNoN!h4-|i0k-d*(bQY-Z-%_*b3nx%wnd1E}Agvb^Vr-5XY$Ln` zeSm@vg`h&Ni^3&xfOUKOyO_(*EQJrcqO{;YDv73Qd|L~+h07v^RDFo5>H`S&Bk9^K z&bs}ui%FbLjtL;haHZfQR8X}0`I6a2(C|@bzJPYWLy7ETB%|FQSF{mO*GIS6Dn=8` za6;XMtA|-wKJE(AI`}fIHIVX0O-vH_2?{)d%@)f$&55UHapuv?U}~w|onK|Y|4G+5 zdc<_pb#*5tE-C#Kl^)tyl~dSf)E&U??c){yw98A2JSx}Oh-xgdNZ4m6jLUM~&Cn*9 z3ZHcmqo+>e`x(O%-RDThQO84NEDmQCTYBc^@R#B9&YQL@=Qy!{(c4r;75f*+KGGnV zzyYZ{qjk_a&HB49x}g6HgJ6hHQols0j8GYJluVSi+Ss1OFT2LH7jFpDDsSl1VJ4~j z3e}Z7tC=&>#)y9SstdX>p4AejsJ}+)p*|SN`Fjk3MKY}rzV7T7x(_bmO2Id%fHOvq z+o#P;`w5!`_QI?T-*f?Kw_g{vU%?dlw@7ZG;EI{`Fx(ikziDY1hqlJB_;0(83CJBk zHSo}0YY$#(W{|4yP!)aq)fk6aoCO}f>oRbWAY)Wy0|CYQJ+ksaAXSLjXJ;+0Y@@wq zdmUiUhwnRcT5lpCyMGm`sG|P?>FGa_i6bv?+%fY*=T7sV2qku8mH2Lh6!DKpyaB{j z_xtkV0lJ!Et{Ak(@MG7I)&Potrla@)9sE@WN%{#T@vW@O^A>{!e(GA%-bzO@Azs3u ztn+D0kpn-Y0#4&g%{J~XHkT$_@t)Og__^yybEY_+>F|=l#iO*9JQV1vk6%y?-I6{& z@uG#eIOMDSr7K8V{&9@hG&IKLh+_Q}SvP=HzPO#}LNP0s9mSz|ZmEq!<$moF(kelK zra*-zbjkS*<-!jyR84=$luH-sehnUhQZF^9Ac!PFf|>Rt>KR@Xd`|-zB?~( z{j0)>?@#1oFItGT$c_$%;f#$&;m^*Civ-@9V2cudOY#7hBzAwWt zUI>4632Cu=et5n*8Aga33bX+vGjAUljI{(q7GGT_Icilit^z4CA8yB$7XZ)0>EG5m)N zoY&C^nV}ABwTD7r`H`ClQEx#i1^;0xS_1r3l*4 zd3?+cnD&0i-o>0h?SbYnrXVjCQAK`nl5=IeZd`58@EGuL2^Wy&;c_VB_flcNj=<7z zNowF2S&0Xjok41aOF8-I`R9my8(EOsBJsp|X>!t~kuNnQ7jU7S&BOp;+nH!P4P5SY<3Knb75&tuL7h!o3l-gxRT+AQMk4t6$}6bbhx469=m3V{91n*Ntv$SU0$ z^zd;w)+MC9npG$hQvEQ;my}yk3SZ5I-T+w~2ELSzJxJl!t|6^E7KU-Fpe1wwiu5)l z<#V@8a_A95d#x42slsiYGcANDe%Tm@(L{Kh2sH;pP&lSNGwm=NZs&w)6B$9oC!H;U zvst&W7 zua6Xsp-%`@U0MlubXkcekD^brr!kg6s_sNpd><4|#R5&8!ZqQBMkTnIzG23R7t zQ3M~3-n|pr(`zw}5PR=%qVta)E98q1Oz9OaV~YADQkT}lImB|+F}^8byK|>`fqaBE zaxaxp#eNsEAI7#JpQ}4Z&FzYS7E3H-tEzW(C26m#W5$*-ocllKd^O&F2}d+@ zq~TaG)8E&c=vw;q7S_hI!&<|<)1@^+!WdmWo@f_HyMYzm5JH43$1#3r(FLT%`Z{2& z>oM@sb1wC8!Mo@`JQw@!!(QjVBYr*p#c%LM5=&%DBx6rgUUWkHf{hWDoiJ@i+Pn=l z0*bXuR=!}0v2z2)+V@tT1{!)!p7w$X%PL!i8eoZ7p$NWUg-KlDl6wvCiVebft|RTK zFDFeG1ur#$YRvMjggU{&)$igsvcAh0?YNWGEswlahYE7fXP_bd z6r+S5piqWVnWG7N6|}jf*n1;bCak%_G;^tnXWFVCR!Ai3e2QY040Yjjr>V8m5Jn&b zobC5?Nomejp_vpFYM~|MUKGN%vw}@CySZN$izhaD4)=EQw53HN81WF4$nHZjb{tvS z7>--n<+(T~#FEm4`#No!@1iChO&zW@+>aU<=UC}7(Z0BzdVgofMM5(j(Pu_P@jigO zO*5JW>o}*fP=yD&hX2)!ri3p=52B)yLwPau7Z&FjF#lkeFxp8l-ez}9(LRKeJldSX2U~WK$=~YgIN>Ii^JFw@-W?hadO*_dov&I^l6UOhKX%N6X7FB z$d^7>7w1^}tYE84@_`DFd+cYlj0AqBt4Qm^Xx9v#<1Is~j4dh8qLiazO$_A> zUs)YY^i~6or^1(+4HjOoQZl_XH@$*OVaZIHc-%RK=Cf>Q)J^IWzhpJm1BnxvvFe zjY5>EN(hnKpPJK>k(ryS+prwk^pmywH| zUGCe%%f+oy;pI-3HovcP>}6PDdj;9J))#k0#?`&nN%0k<n@Y*jGAjnz>MdvKCSL zqy#P9-u^`_SYUEByNbNy;nl7wZ7Sukc?%s&B5kjsHja848+XJM z(@c9$buGMgo%BQbAciqT|2EPe2KwxPy9ryT z!o-?uDV^D=?n;+$%Akk!mhg7hk@ogBq$sWhm!Nl05J#*Fl6u@68d&{Y@=WOc@J<(f zf1r+U(*Mg;=rwwDpYP&*at-8sT7;Xktj#S8?FjF7&1o|_!?4CygC@rJkdfgC2=(TU z{nnPX?hZ^}CyT3`bc|jLA9St@>wu1cV*Lnh>HPJAJ}trbv_ ze}v?WRbn?bv%G@kl)xX^deq!1UKQGDtI+XL*OBJQ5u`nA6`JuS>0^|{4lnaYt+bH{ zwYA&+&(>+f$6ZQVM^l0_J*7`d;L`C4>YxQ3(~EEGEpe*ADD`Zx2%mHnX}$hXp7LQ# zk$;NhbS~!pkmvy6ro+yG6{K^T4WD-YG>2-A6|11v+q;Ol0DBi+6`&>KGn7GN)EC0x z+k=_*S!W(?ij4QD8J_4qM><+&zFJk;-;U+4;wJjfyNoo;%wg^Ry)=?U+P*+-41K8? zcWOQwK62xWF7`a2{7=W36n@>umv|ps(p%|S?%~}h?>@F=6T&aMz_b}g24Hnr1rys> z$aW*JXeCMc0u?+Se%U=8#rhu z7^xb*?)utE&#($@Dytth^GWJAD3wFR3aV>>BYo}v+12HhEaTNTU02!=0gH_ta<;-z z4=VC+k(^$ceCj-hAc(ao48Yh2(AvhriSc^AgZ(lLXR;{LZ@a23z_e=PJ-oUBE{CuDSmmKV}h5WIrR>VF1kc zSCmtl?(M81&n(t_aZ>NkT|!zNH14lxq>!v%P!{cnJiaRn8|N-nczEU!!|_X(lV(5U zSoSdT;t;fC{E9N@&cOK4=^oSC%^%0MY&YAloqZ$zp!s?Ds~92TyEp_V#@~>U?(EX& zp5+>W%WWIN_G!%i*4fi6NQX0iFBMXa1u`&d{_2TUK&7QT~gRM*~o6Bhif>coYcp z2+AFZbqr*)wuIT}-`iLr3;%XO3rT{i4EE%!AC9G!@c&RaP0Z3uxf#(I*wl-zcd!Q; zS`Mu2Y!9&0v>*QKV$;@O9Ci4`Z{m=$gYW{_5o`H!a&+HZ1iy&+D{h}uVRZ}Bh9sto zl8JNG%(yPZjAVf*7jw!q3o%;O(Lihh75l}>&cUYY$(##49RF+{({KqFkQPEGEUR`E zCNsBd0azj~NfES>wZ&s+wn1xZ3c2Sn{$I)kq}70MOn?d%P?2An%n?O=c@b;wusfSW z;GhSbeHpIc#A)`eV61voC}WEHilpX{hE>ZJ&ZYy~m7MK@3~7!dhAWeS7PO9_Wb_K{ zn<*q5iHG4`#TnBowt5of5TdxRO726z%^onW_4ctP68l41_#|8W-EFJNS91+%Yo#k| z5y$FyEW89=oq}jWF?y8QWBay!ani2lkuSU-bIM-B)uhFru zWCP)#bLNf<*K!SM1906~SOgU9wMp9|ZBu^oqH|yZ>q;v)X&t#DTu%pI9{;*m;bYCL zi&K)XOUdjR%GnSw(x125LkD_2mzMT|3dSU<5Hl6uKHN6W1?MhPt>CtK(k`rLOon#1504Pe zGwHXw*>A#4Tu)lpDnyf`LLUR| zdo%sk${Nlr?zVbi{>WpF)MOdIL4RL4thq{3Emue-`sNhPmyLmojECR1*xAn+1k&Z; zd1B{JxP{BZMS{Fi2xKj!$Zttd;Dy^cd)mf;94PYp zNu6a}2{@htHb4NaK89gd@{BOKz+jDC#O+G<>UjjRf2a;hI@*+!g2vhF}xTyAbU zNzBaMDP~>>cXVNCp+*jAioCR`d>&d(AWe6oCcgBt{TY}y!_)o{`*mj*k=Dsnt^NX* z_%@P{UabCd%ndcM6S<*gVzS$?$qCb@83M7JR-uR}+7n277-;h~vpBEkE(O>wjHktI zb0N4NHoJ;6M|F;AI`U#8g=B4^tPPM=oMEHWC`Nk4yvSQ!Q<}vfKvSdw=kImslCzC+ ztdGjVcqg$cj!9GqCpvv%9~A+NJ)6W$2uy4zk?n9cg>ojTNzSNkyJ;3|gB7|Ew!4C~ zhvJ~IZ6r`g*IlUVCeWpbJ!MZ}b4GTc?D!~ze`SkXcXer_?OTnF!Zl?oj3JS@9TdmD zRgU8=*)By(AlRPnhn=n_&GZ+LsZpVdD*9cdFNH)z_tMEuI@&xKzr194qT5Y6E-h9g zuCrH`2K^~coi>&T%I;SMMRMo3lCXyoSmD(_T|@+=MGj23fV2u1P^|!=ihfe`HQVN6 z2>Ge&q!*CUt5DdCU)qOq8tFN$w)jg}p4qwMn5voI>vSg{{oK{`%dwY3c)IH|NE|AiM2qI-nw~G@L~ZhvU}EVYBL*vtTUs0e5pPXW}d4 z4T&hd)9>&z^{`ycM^qW%YGA{LP);~tVB6G&TJ ziiz~Y)H-Dd%xXo2I%LT>hcf7bS5W2lO1LIMHA34}q`BaQWhzvt0hWkqis0bsqwPsq zyy}cIr#XQHV9Y9D*{ZbYF-tlcZdoj!l1d|{D1?qProEOKh7s1_=@#e6$=RBxZ?fZz zn4w^Pos5oPoL&;17#GOMXa-w{<;<7+LBgW5r5UC*Yr4V`)462gjESKq_?NBf#~;1@ z{jk?L)10lP5SAK15igN=XbQ}zq_FIi8zF3br=nS})u{?L1ty#byCme>;VCD2mA7^Emr z5#3YTEt0JP_glLrx9-CtFm{h<9T@%}`-sZ|q3;UklN1i&o64b2N3%-xfT|AxFPvUj zdK5)zU3FTfFza! zWMTK`VNUoXwsP*HT64Cv?yoj!VU8!x^T|m^fB8ga>yhs1lpFB}<6qU96F0K6m!k*G>A4T}M7u0cC$>3y3tCv2Vseju zxUXwTo2!mYR#ruql>1RioYCh}W;=6jEFam{hAEs0Wk=tlLEPUJrOnVrDrz8+um@0B z>6uMpR^b&J=?;;;dn-1AUp-3VplUzd89x!^?lFp_evRxp~_0rq*&GcuR;!^537ZLTIDdrMS^ z5kxtv=pW(tT-hf#=+Wi+Naw~y$~|WKrWae8oND)7Z#2nGj?PD$EE)ys3+S;|v z^DXugxN~l#f3(x5-MV2J_Xt!ecnlTLdK&uMjQz3Bp61yb!5$IC{5UeRdHXr zdQtJ_KYpFOwL?J>QM^weFFhfJ@FpBU6rSjWX(2DQ(Mm7FfBzsX>;EXHfUvY}VP64Ud@bTcq{d_Zf&o(S&B2+wyFqrEYX*bs3rhDQotK!wFc z6z)%J@3$DL@Iu#;RwLC#lmeFcUPL~Qb75}gxMPm-gRhaOsqq}*n29;O+;yde5lkbB)=ZN~VB*A*N-EW_plXIxd6R3+cpS&(&59Sv{G=92 zW1$bPbj|m`mzxbGhiz!dDP+?6Dr)7B>a%xV@C-W3KlEyslom?mC{}@9Dq@QHHDo>< z%=ws7-0XoNWp=TRiE@Y2@$gy~a_Y#sk@LB6SZU^vu-8%82Cxh|OWn=@hzNAzl?A#^ zq5Z8*b$En(c)g1{B}r5cXw{P!>+vP%4HU$ck@}iioJI7}8(m7;;FDB8M3#s*QG_k# zW_c0f4|WONUU;+9r#Z};W6&a?Sl>cc_O|7)ytxiS+{0VfNh%Dhd=+Axp8<;WZ6u|M zon?Z68_8m-IK7x@Pa#tZCcbZX^0Z#JngOB?R07^10acGbUI#yr;GHfY&BraQ_-0n} zofd#4;$0NMsxQX~#)YVNJ9*knviW(JmZ084P7XJPDbO|s7-nmA;l0kAR_nrv?^`}q zD}w5V-$!yfXG_yq+O^^RPJ0LZzPV>s?^8d8C6vfMK(eC&CdXUm%ER%^@OdjM!7A*m zm6(2{bAabzVh#{K=wi-E5+jEA;LEPP@pKaWAqwU|QC>6XV!FA6{lXu1QE9ek^D0D! zDBh2d*Ivf*vOf3lW8G_Uz6<;7qt2ez`;813C?ZS6$Ee~ud|Z_mhdF(thL5|3G)qWG zweO?DO28*5fYl#U#V}vWc&ZVL_0O!J?C_Hb=AGMZr#GZAr`=n`-+i6x3q-qdf1fM4c~PBlalxg zU?!o^-oeerMQ|zk7L^kHa4&~8QsSGJhunFIFDQTvuFieOV*%(DYk$#V)H{v~+5A+eL2f)k^SGim4 z`%a(sZqy`G*CR{E52%CPv$#)gr9Ijc&vgBv^QT>Y!!|hw6zz{lYcZitYwM;Kwgu60 z4?lLgJ0v~$UMCJf_?e@K<0s_cjtD5$pON(#uvWJ$Ef0tN zR`tsr_GNn+#>m>B6MpWZ&P}@hDqI{DD%+wGh$a3P6wk0IwpVUFnGuXEG3A#o?6@Rh zh3%O|9P#{$JY3S(2)2Y1<~3)9?$QvYIhf5Fe(k(z!;|phjmSpY25^b_4aLxM&sJJb zK7A#Gh&fF?qXCUnX96Xnn$8%%wyXYYd}T+7t(XsFRgYW8>EeC`L9l#_TU@VIwP7G|3=2k^ZsLh zZWy7g)Jpif6QwNzm80M}kU0K94sJ?SHXJ-dD?-rfa1FGZCC7z-I`4M;@^qh6aQ1}N zsK=I+e^Cmj{TU1P6yl)S$IL3>k$2cFyAuBGn$Awr6vF~|O&QcRVTnvq_#Y}P+NYZ{ z|E!q782;#z=4GA@SbsM)6)y=2c}amfM*7soHeItYocK{oKkF(GSIKt+BD zl5_McZR_AkV&Rfbo8}Kx*CT9G2a&{eDRSKwTxqd!{5`gS!9}(jCgIX9Cv9@P;6eE^ zvD9CN>bbha?vHW0;qV*8`S8$ep(|Y0B`+lP=o~pqkues3=aNVz_i~g=_tR5$PTaM9 zTP3^1@`A!aF7uQmnT}}6yw!v+MVF_d!@w{cjoPd8Jp_ODw(xn@8nbBOyz>@{-OX9| zyMn7q^8$*fCam~HBaOsek>Y4A=BxCU(K$PZIu+qcu4R4F9jaTa&*8**WpW-0z!;HW za3cT2w+F{SB3pO&ELK3;;VQ1-)Fcf#l--TInSZKsNZ3^=jB5rS>e|FKaofxq{j^cI zn#)NGpE#7M@zxlyG+dn;IFoU5qE&SwR`5`uY8i?BcDO;dr{)^2BW)%VoXJq59$Qkb zNhwU{D`t*xWJcP^wVWOwCQPK$_p`)-1DK+{Hfe7P)cV*C7Zm1@QHQdSYvDRB z4$eh5*j1!?bHXzXYBYdL%uRJCR0n;Z^w9F)AMaWKK!G86|Uo?Zo0WvzssBLtI~AL6&+RxYlCKb_vcs^F%v`sE}>$!$??(XUwS;jkE*9^ok0biw=z zM-tbq$;J5@4pX6gCKtKFZJca<(hbNU?7GL{#CS9rnOEgy@3@m~ohRJE<*dhlpWZKpU^>*W4NHU*;jtvNkWyttjL~hp{^2+$PD`av z8gLz6LheW*oR7<<$kJXsAz1X`PObvMg@ns*HbMj`<~x&_AzZgrd+|i}yEx@!(_^}! z8UaOkS6!;&9tw59^xz@5cXM8RNVrtN*ppVHj4A5%q#lp(BZSC%yupblU$A<7^F5A$ zV%U4b8EPa%dE0zS*} zE(jkcJd){M0dG{q6zvw$=B~j~<-DL-RzuG`rxRSofw%@jw0lzTvJ^-p>qN>j?|zZ4 zWEK{N7Pz%5VXG@ivk9BR%o(0Y?@m%Ka`~&cn|sdg;cV+K^xe;$#cd?xh%;V@+zu++ z?yPBV!=%dQ1k^>IL|$5vg>$$IA9;Tf1z!m#JAdN1n{*_%;H72<)zAejO`wGWw7bG; zH=N>H(kxmBvrgwN&dn8Q>DWmfjO!J_N+_UpKbr*7}MstS)l`pWWLV z^|;`Dy30u003uA2p~eIPi8_O#XuxS$-2%%LLz#!*!kMll&1DvlDN&<}D*CfXUz?m^ zS?Ej(Yxl$1E+NfdE+k`^ib8vbC1(`CrQ{qcVYe7d=C`5oo-QG6F>>Pe03wR_UgTw% zL{q#nco=-2b=I`ON*H$4Y7_uPdT)|)QY=}Jy+s@a5!%j~HVFvAIMpzV)bK<(M@r@c zs7tuD(7bDV6W)m3LqzpDu4xYtrd!El9Hq{90L)T9PxV@|#Es?lz^;qBpi7n!NE{2~ zpvjQOrSng4!Ek~hUI*K(vfHWCMu!~7ZbaV9Ka~h1X_1nSg`}$W-<(UqjBwO^8V25> z`OtHv!=xuyB{IF$FH_ld8Rzmc7{)K1hhsUB7K1`Tg0RPhrWHlPqAdfDasFw>m4rSe za3ZX|mJQ5$LF5A78rK%Wl8Z@O0j`2J-?a*p{}69P$7lkNB(6{*<3mU?IAi;^ojW#VSp#TqI$>-4 zYHSCWSA&h^&sSY@NYXr?SPnK-fG%b)FNWO=wj;+-gyoT905h-?_PNBgX{Ve3GKMF{ z{bZz*>f60)cag^+sB+m>dvP+T3?mur!6qrZnpcloV4(n1KAMZ ztr%2wSn=PN{LBcJ#fT*pR%k} zhO5h{iQ&{~<4(xNV*DIO6XyfSc{DDVVLkX`NSYZho`3vceiMxG_DswP%Z7pQKo`^o z!1NbCxte}GpCmtsk{Qb@me`nQst`ilu-9-24|Zi~eq0gB`bqJN60}4-gd*rwR}M)- z#a64@{!mwtW*#@6la?Wh_+cWhTC+GqnDNtxJ25^aJQTs$Wvd|zIbw?X5u~PN&RVBk zRI=v3SXICzjj|ZdyFQ6GVi;=;oG2ejO0FPI*wol>pT8C!<@9M$gd8tQXx@`ac*%J* z<&4J_dxH^zMd2~7AY8(nr$F3yM8t5kR{{s zl)?QWi%acJ>%{FFHla4tZszAb!Kv?@^z045c&kw2dLp^j0jx4$$5l31gtPb$SalnP zCpmZ8gLgpV_ts2d5w8F*Jx`_{zV66h-i2L*S?)5|K&*DtQ(VP*{L=KHu#g5_11Hv} zl9eH}SOwT@up*6dIbKh5_SE6dj;pmhFd14(o=zo}_p>@^ueLUv>IWp+j$a;G z8J^)P(&8c`gNl!#5mf@7sasJBvSl6uA_Ft49d2X^&vFrIkr9V89o}LDZwy>oo~@ga z#WXg=bi{426R^WQRqs+GYQiHmuu||`-HnD#A`4k&l)Mj$f%UtCy{QC4!tuwi9Vhx2fy_}kAOkW_HH7+9U5jd#z5N~ZL z7x6DrNY`tri`}A{9lO-qkNc7(;j&x2&Na;>b&DdP$tr%q{M#ynB)y)J_=NH_vu(&4 zht1jVA{-Ik;6l=7oE*kvaAN$hF1pr^#9NP?fR8xyMPx@BtQ35d3K&Wm zbNrb=F+jvCeCz-P2+8ic8Xiv&tOR_V0&EU0)Euz=5Ok|X;s2aHt-IA$9?0=T`w7zC z7T4W0LGmtP`{XL}agnjUztdkG^|1BSB5~o9E^0ohH#mvi6K_@KxTYu=Ck3RNUmzU_n zpNAci-D9%(CTpONY0qjIUV=VHLG0`?OGkMI9EP&oq40SZk{I2!q#=8(ji93c0_hp* z_oN>z2d;LQ`13`lO*3z6D~t+Fd|x6TJ+pGAA(HQq1E)O?Uv}oSF?B-7s)#EMU!ewO zc-SuXMV!cLA6VYR3wviN<)iRb*OF#dI%a&WxMDb&L8`t+Rg7a6o`l7XIpBWXRZQ*z z(^KHGU^GBQ{0$Nx2I69s)O&Q-3T0T)ufv}18qi1Kn=U2I_7|$%4>eebwUI)?zC~fT zfG`~;#BEPl@#}Rq;4Rgh$$2l)?efmTUrwJCg>CZGNFb8P?@%OHuDDbI zH>7@>lWSLV+>h~zM60zNhSY4QB5Fi_Q!uMU@g^SpZAxp*& zD1$?=uV!J(Z{hSVj+00Q!Z+cEt|e^<7GfOx3Ok8w;U(xt6m-pTK`o@b?Fm124QXMT zNg51Mv_By&U-%euMD)@+2x==}AHYwYbv=G#dN-~MTjp@0{23|DhW3IP{3Sc_=;zLu z7A`LYi?g`1pv3kIvR#=M2wxTTB2YFP-VGY1g(gAGiG2V=aS;>sRFBG@9iY z&O~1V{d&3E4Xk1qe(ki0u1O_raU4?QzahEhDpt>LJa>r9?%z7`$}zmc8;x*M~rVT6L!HNz>l=#yJCZhU~ydfQtNLfVAj7>VccU*2z{Xb4NTE2hI|Q^I0n%IN~8b zIL{H@-6&ife;CVh%;H8(e-Rzu?uEC~*rfInRC`nKre@*a*&lZQ8xGOGt~OMgXI)Vu|cBBs&abVK_I8Pb{={ z@-x%fv!~W+4`ySX-^;p!Gm@TC5!G}Qzi6b9xXV$TCCpmIXJ$TygIvKKlN3~9$qpor z%aenHWM_A;wWZhX&(EN$KGvRSb=pCbS8&EO6N^Cco;M(h`ii9Hi&~EH!MbGSD4ZvF zC1=Kmgm*$X#;!&T96G4TuS{}YHw;=-4MhEVY%K2}^ufK#s5j6JS8?{VFxvQ?u7ef- zRmsnJGINiyX4UPtcG$nPvAGb0;#uKpu3~4>Jt)D&$AHP9izDmn38d%h)I)n$)7s{& ze9=;Sf4GK=NXt?YhViRW02JvpNlM$kF5fI%%URPVhe^q6V!SpPZw*GyTHN@IpT5@> zTk~dytFT9TVmNC<;CSY4n1S2aUJBQ7H48}(LIfGQ-3on|N-EXYrD_^* zS?iNz38Cg40h|c0Pr_nvkdebLzOpc@6Asp;#&Q7`y z0=K8GMhcT;-jp(r1sA(;w7kZ1J^{#t8thr!%JV>1?Vup#njPYjcPB|s&itIjD*1;} z{>?M_&96PCycsJXyLCPs=6cf%GQHdB?|XqwI&VgutmMiE6}_kJ+cGPpjacO{%);Xe zhr6~kmr-c;_|;&Atubf`If6p?F7mS6Ye0@6eD#f8m~h>2a~F{|KMKi2s9{b+5mo|@ z)SakKA1gzY1KO=RZntm|X?H?sCPEF(OeJUuxg~{gSSZIXRtNh+dpX?7`P0Ji1TpS9 zG*PxlSq=n^y4^h%2s+9MFK8f0P-43^*@`(WmRk!;2+P?#+u}CPn7E#Xv>3ZY70oH* ziS}sH()Xz3Q?kks9!$TD#fjP7{ox$;E*|3&(u^R7jJJBcTz`c?nr=%?^aeUZW;^l) zjJb|G;I4MoFz~G`B8zclH4J7kC*00uorb@e?z82nrYmnwAd$4&QyQZQwuFKAkB6Mv z1F|wL?Ro1RcW^~%uY-e`AaB(`OUJR)k@d|&8mjqc$2t1}_st5F*zQO+x(Fq+Hd@K7 z+!F5Olxg9U5>9*y_s z6rFaM4|j7ud`ReP!m(>qqXH`O^(0>h*!mI(n5Skj<+GIS23K)vl8Q2*X(@lD71f+W zqBc?#9kjXx+Yyh~+vH->CLjkg9o{PB?Gje}o5^oo4?B%VNYceDg{sP(E(*VO_F&cT zc&DG7q1w3WhcT`WSOT_C0N?nAgk9E>9EB4SRVaL&kgjXMQgNbGC>v%=WpDc~BxQxI zt|IL<6qr2;HR_Nhg@8KfSqPfkhG6hr#*hT>y_wohcFk0*_ zte_`scVa{qC%l`DJz*SEv?r04Gg7I$779h2G+fy_qPfkk8BTU3Y44|_j=vI398$J} z$}$TvJBVR3Qu^5)T3NluQ(VpDH^lp3pX0c)0_G6+$oa*dpvC&UD7Km`(g!yW}x2 z(Vay)j>H-$w#B_n2okRF46<;xlj6gK_fP#yI7X{R1ytnckeqkeUlp&^5>^M{o=!da z)fcs+7`L~Im}0({n9K191kLGOhgoM%49iA4$9QWI#d~k^(gy_#TzS+tDiudVC2wmx4Hi_|(_e zY`Vu4OwI(DKAQ?sq9zD?>eW9cd52Axq^E8u2A*nUZK?m%O6rU18mCxG$XNLel)n5>R|8#@#D} zmWDtL>bcK_q}}>zWT_ciV)j#vEiWPiC^OmC zob>`NFFKGo&L@YBeuEwgVqv!MkxYZP7{@5$O2Pf8fI|;A+03l0@gUK@RaQk^ODu1M z`#XQy`a)^5P7R=lA3$QRGv*eejm$CM(Q=7KI5a#w(D~D5pgL2r4p=%KL>&yH`te%y z*=(!EFoXv?dz#rec2-vbOpFg9BL`fBP*(W1k7^U&_qD?yJk#!0B_=$<$6;R`bUcwdxNKM5BiG3{Zg2}d?cP=_J^3V;b?>AHR7Eyr z)h`?AB=pG?N}s7dx~U!hPjPXRhZ%W`Wi*pk{z~D~j6iapN;#bGD$XrTOfAPg&84K- zIYKfKYGgih4l4mqrvMArDMK-TU|IfSc!n!Tn{n$iAIESd;hB_h3)}-PH5NxB<{r&ce6qlXT&r{ zBmJBdJ7;MdR?Ly8{u0-APEvVu>vGYUB1l4oslH2 zifA{m`sEmnguR@?j;RsWTow~<;!?{wSU<1W!cfPk5 zvFBI0IDD93HBAqnnY||5R)(JZ;tgiN^ARe7#;E& zkWquZ(xdPOmv*m-4@(9?j&9=eW&)X{zL8R|!utlBvSt6oSlydkOxj+M#&hY!^ky<0 z3Z^_Nn1A9Oj3i@ceCs&Yu!TPcjLq2j2O&?)~en-9a=Tvl2v zPLL)_jb?mFdOIZ@1WAaEx5Bxr`0);BJ`Uec_kSwkZsCaGon+wq!eN~$U6m788MYu$6j#xR2ppBv*aFS<}px48urk z@WlB+acV5vo(*GQ4IgsOwEHUzBUPgSDAEs;lnD#fS=dVDv^&`ChP1(tID6XKXca7~ zsFX)3GDyxxDTj}{xUk4FD7LHNV=g1@#y2cf3QwdTCn-0t_|%MJH``1S_&+C{+`p#J z%Ne35KS4@{7OUxuJg$$2=6uqb@gZRx5suwyH8yA40WwgLe~RQB;>Vne?KpWothwW2 zN1%V&m82PH7@zG5(zI(}YbpZ*`ek>XM&zS!tp69Lx0N%`s@n_#9=> zAFf7W;cg7FAnfN|1wJG^rsfE24l4dHkpB*Voq^eglOBsTuxq{w-QA3;1kZU2UvzQj zC5amu4<-X-a3E(G8<(PKS3_3w)fm67<4e2_cDK>)m>)fJf+sO!;0FynG0b7h*o3SI<5>nApBUHVeX! zN9*A`^#-tCzw5kdv9W2Z02!_%e2)^C)KZL`-8#42TqfAFuo?{kIx(_q)w@#YogpU{c$dJVKJ92R%sDB%{ z9)=o7*dmJjm)|@2=}Aw!3_0PQE3!z~A1I7#p?X=+?Xg1DN4=-nl~q_N;g2pStRivnz39Q8PlD@kvy#f?be-%dW+h4XnaqQn8I4pF@S zAumI6?ltx>24OV#uQN_|)u%g+5m1B|!wco9KY{6(%i-eqE>@tpW75lC6K*mfaa@8N z4Dm2k1!?NtIc%zGu|nkja7pJ%tK?A1b}3_t?oy=VXwG_$8_znL^&49IxpWcBLSEV_ z(;WWVvXD8RXfH!rmh-m5j!^4nCVD$+R!$W{7mVf5!ew1gH|ciIp?a~!01MufND?`v z`Eu0Eaff*9=d96#oUG*`_II-GLrg;)g@asU+PEV;>+xzdfJ@BfDW)-Mq_WLda3yK3 zTzb?fW00IHQVyd=f$P+*oyYUvfK7i)(F<2{8EKs;g4pe-!xQP1NlKTXl$e?o1Dv&E z;KEg$I?W|$&P^@hO2Sns;Rr~mCa9DOZ6jwHZZ^&{!`yf^7j$;kl}2|w>L{Fgg<2D9 zsM9%U4a4rEjy_7GVG-EwNbw7%!Za~S;?*gUR+CaiVY6ge%hzx%cTUnWmMZE{BD*HZ z%0n`0h+s&*mQ!8mA=v;$cx_$X(){gO{QqKie|A>Nb(}Ko>g=yM;f+Cw?7Af5U}>>L zT3oD1gaZd=aV$q~7_R4xn1B-eo$_2z4=V2KlbbL6xH-Ul%)xxWyRZ^&;0jJoQcwi5 z-c|e}8XGyfq}-5Fn6?wwX=~j$_R!-Tts5{)(>tn6%(L!D-NX$PMF0`b#gqN zlZ9B30kh1oML5i*q(%3Ncw@b`hCqUDMnUYjvh*2kp`RPicgl{Q3x~U$v^yg-lcGi~ zw1ga?J44eCn}v3D5no1*1TwA`5W+(#C!B$>3d{ZGE+s9Dm*JU`DcI6-B(+#_ZMD#2 zZ?vJ0THN6lE+TCX%_IAis10-@0!21^TSOz1Y^L10grMSX+6WiOog{TR4o z&5RO+k*ks8iSlStR^zxvc#IQXxHvAs#CBV<-2xZLaCmb~8CVt@u~T~{R*|}$3u)so z&7PlVGzoih3f9&^sdQD&C#|=qRt7l7&nAj#vv+V^X|b+}#~m4@sE;M};h^^PY^64u z_YxcNoFnY4M*{kBE+sAS>$s(~@5v+*cSnlj{F0r%kURX@K)Srv33qZqX`@EXNe*?m z(r{;LxB@g_m_zZCc!K{Q#LK z6-p$am`@_};b5-5TzALvI_nLcvf9N7x-0F`9%MAaZ93V7q@^=faqUS|zic3r$Q=~P zz=fi_h~2}5#F50OvwT&Tkz+rFyt{;g+o!n7d*jchciS;wlR5Tn1D$m4q)ysH9^)Mh z2V7i4Oku_qO|Bv^Ja8KvNhlC!l>Bg>5 z4NVm73{~{^B0X)(=#Cdp2xC+UB@3N>a_`o_nBf?C1E#p|O>SEJ`L^1AdyR{?Z6{4L zph_zYaX)QD64e~3c$aCCdqu<;Q?gR(q2rYJkZ_j;WEZN26+|qcihiE-jM!n~>F&pw zd-ktYt6jvpO6WRy+S*hEi1)Y_QN#-*=As-TyDQwljm%!okvmthM2dg|ilFRv?zBZY zA;s?11SDkt;(Z;o1?cFC~Cp9*4^;}Mx1EE`AdJa=aB<@^_ zPGkW!p}k+E-ZzS)1^(b)iX9Kc;a0l zZ(R~s^ypWeHEmu~xX*5`8jSr}k74?*i1(7%7Ix!_Cp?J9&d$M1v(H)AC*9gOea3{j z>~jH5jQhz*4?rtF9mKH$H;JPd5sp)^(lw*7=HzMP(AX+aEv!VGPZ501yL$WKJnzQ~ zfKxf=`Ej;`p(xzP>C+ql{9(NC20#(tSHxPFnNFF_BWAZ93Fgf8VVqrK^bt?{eM zsykQrV0yviwQ3vTfv)sC08Jl$=7$|j5|vcu3(l`gc@Qs!FOp0wr5*MM;lVC3ZO{;e z-ApxdJW)P`l(b^Xh81oQzrZmf;h|2P)|r~EwH#FJ4{Ky+ooAUjPc z-JZ=^2m}>Y``x+lzs{XzrwXQY%gdOeek7^6bUj+^_V;#MGx)K9Vb$e*CfeqSX}jP!ME*GHmmc#dmHv$rM=m<}re&!qr568bX@tt{d=JLvA_R{ZC= zgtP<%VcFeKqXt+ao=*{cn+m5T6h3Aq3i7)HLgKr_3tUc`b4MAmQ^tN=fG&Kjut?hr zsg2HEA#EQOtV7Iqe345@vql>-_Z?8IFD5I8)pFK6cC%Yq(w8`GS{$M|X+A>~?@P&h zD0n9wp@mIWJ(MS%4=-~WX%C_ZYJH>lMfIRAu}Ip>B~61Pjm5e;9J^5REWE;{orSBp zs7Bm!6%Gq*CsSj}H7mGZ(IyRd{U6>o46X&tc$Y8|oo z1+ygUSS0O@l*Y-fX*G+%&4!GRY4W~J4sK!NYNGaU5l+`VU>3| z`Q-OV@%yo`N*PbI?XgcR|6Nvt6Xon<#- zv!l5-PeDd*()*nE0t``AG*P~vl(c24`@gO7O11&~1I~;O2@kn>19$;d3O+~$V@ucB zVJ07P=Cs$&SF6XCuFI#Ie3+y+#l>)9tSLF-lNO@(A8`$7Yk`8a=dA`}GI41j7D_AN z>ppyxk`96-F3R<-&dSG}`M9L}KE5JnIAZuX8E80YVYYXtQkJ- ziqhWv=DQSOd&M{+X;`;*m4hJ7!i^jeGy#DMczc0Fq`J_IO;~rQ6fSgiNzeK6rX2HA@uE)#I z{jy6+yD{aLvVtbIuaJ!oLQ6F??0QU|3}1D=J0;zM>LQIJiR)|Rx(Tp+F513%iA5i6 zhxkkQwv(oXdFt+Uh9$o5kdLX?RU>eIb!j=P?*CnbH8Fd!M2|qnR z#)fP<0~6!V$apXqWvFC%3)y~+f}Ik6?(}J^XhO7SphhFS1pR`7=yE8dAX_oB^X#pL z<1bwYJ|y%B0htaps;Hv>73s$|DdfijSO!J-wM)3rn-nV0((xPW;IL6|5Ma@8CH&S^ z9G`R}rWggp6#ef=PgAYNjh>%p5a;;wR@)oz?0&_eE6elNP8dk_i>0&y^jbcvOkgRW+1E2>A(#`ag2pFY|L5K`44|~4QXo@ zWmr>E{z}twl}Q5sLVb^&ScL=I|KpEsL1B!hiCsGT-G(E?RFNRsZnFBYIyb^B--CT32(RB107KmC4HqQFc^us~mf5tRbRVF!}<<6wS8xSFd;dn7gc<;GFPe|7THVk?~jV4;u^ zixu${)N355fVh{3!OUh<3AiQ&tb>5Y*Dfok6|UtnF3OiWLzkRuQx4yfx+FqdmRLp6 z-RroNv_~>AHBqRNa9v8^TT*mAiXM1*{%0A>l^OnifYYjYmZ!QQeqSbXI+f zY8zXRT64V>ly46=ak{jdTl21M?iB#Vx{j>uR+@m3I=;U+(XsGxIM~?}H?wE|mb)i}S;Sh}=3eTb8%nVxlbI>Qp(p`^Pxt~xKQAItaO9jikzIe3J%B7}-D z{S5c?aBkaSt|D#uG)K0ZkT<6gN#e~YkuO_0{Q~2jRaidU<)lqX&FL2zqIi!W?>g{K zT-n5X5N_@g((Ef38d#Do2DYbBNYjzjWOG4f`5{KO7FIDw-@?_TEh|+mmDp7q@g?P! zl)`DzhYSB8FY#8cBrW{t01e|&#Zzoi0c{32GE3dlhk+YDVyULnW5xOTA$qNSvxr@g zN4cD|!~}RsOWl2hH9cu8x;?k1IQCklTu~Y=;^S9(Sl+&kYf1A8gc9#emY-IF6!Fm{ zw&zm$E#@1}F85bQ;TR{!hXf09BDup#z-=jjfwuDS9gq)$h3Dbwa=4xIr_EE%LwF7< z_S=)4m+wU`nU}<6c6yMu_5ThBNMBdtvI;8tV@2QSFoolsK5g}*jAR|4{8eF+l8if2 z#*KIvv;}`=4YN)kt8mzth4?(J#`$n3my+lm3(Zta#FmmfQ^^rfqM{YCPI|UEp>_Jq z30r4keLp;+S!@FgcX3f^uc?4dlN!@lB=4@2$0dwnU7}b#%VxiDH&>GuYbxTHqT&|? zUR=*OW8bo&6t3tj45Wd@;WoLdS^VKe z99Q7LU1XEc%@j)OuxNYR!anvJhvQvR+In__=`B2wZXxNdK+5HYa*8>(-^9#Ebl~IR zsjsXd^ghI%hY(J1HEEFqp=YU-)_?RJer8`!V3ghysh6IDjM?p(>A5=Ck!GsR+1!WWuG2<^ zfFAa{gzZkAX1fc_*wv^*mW-1qgG&g}8jJtZKs?!*?~MOC{Y8OSk17+!TY(bQ4pHr9 z@jlz*5Y6FJoNBUR;j_R|-Tli<2ZM<}ij);dQSKzAC9xECPooSRiqctWgBfQ|GsGMA zW9Nt>-bLcu;Ku1yh}q%6tUU}7*(L_MqhVzSF08kIJ0=pQ9i8f$7L#s#mBAiC^-D9m zCecdxX%x;JICUNwZ0MG#a^1u76e>cv#Iy-USfyTB11u3|Py`+K#ie$qb>j97n_5{8 z+?mdt)+-IbXsb}-I*VMi$ZgyFXm!~R%sYOtw-<&3?wCBS9Zpznhxyhj>h7+Dvt7Z? zq{m~hrXY<#y3V04i`VoPI;}SU0X+>uo5$|yGSWsR127r(sfs7ody(}@xG-gz;x)rX z&pO+&Nf$kqTxu{Pxi?9e;8PqZ&KvbmXQk~#X$zMPqmF@zZjN+Z5kik^PAkTkXBAOH z$0eMZbgi8O_Kb~3>gK7=+~aJz(-cM95-XP+61pxa%_-GvXU|)Waa>7QpoD{Pv6eG~ zN}K(#+Zm5bx(Vfk4#yG0A{o{J2?q}GkoVTcRaA=a4Yu0xlvz0#9tjkBPMj8w6`)t3(vM>cu3Nq032gwFGBs)Tqanlx0exQJ2C1yXxaGY1y^E`6^WZf_?L4#B8w9K9s0TpVgcTc2{ zu=6SG5D4>BAK!iP1kr%!jkW{ar;`_bEZyjVXOo0+)^lP>1dB zSsk=jTN_TDwO5o@m!%|(!UJ7Jng?1JgsWhR{6Qo?3OB*u(b@qZZkVu4*j za{A=oxbQNSN;02FnHGd98l_nKL=pe+B$t!+@axUe2qwZOlW-lbPBWWNW|e(e;VI6Z zW|RxCeyXgXOU_d%hvRSQc$H;1K=gy!*roe4mywnqq1^x`qX|{?PbWQNwB;jU;8F~6 z|DWO1Y3_F8kuniaq|YQN-@mx7+}b`w=?g?_;c`FA8SjSwKD`6RHR?FJjG>9|+2mUX zyjbD{p`2F!iJM~V9dTPTY|`28{x%Q# z9fcP;ciP<+q}^*Z@cPo~tHGC~7gLh09AT3&c9>vpL2uh(|1J#?{eOw;NDDV-V{pVV zF{N*EWGQ&56f6!`M)-eAJ9W@GIJ1YBxq`GQT{uRrMg>&lFDLo+@I=NA6XCf!%w7(! zaQd{!W{zY6^5!l(z61xUMPNyIB_(ijFH{3r+A|+s<-BRLZ9^RpK#A$qWTII;ewY9Q z9laqI)nDVRX#?uS^8|z{0k5S1j%^-*Ix*Ww6JF=M>+#>GziL9Td#ncbI>ZfnB78jw zX+v}R;euB~%pG~5<%s(mT+Tw0oGQM_s(xwV?L6XMC6(wmQnZ}bm|inp`N&@BE!=hc zCYPG#v?dolataB1GlenBYG1dF!nF&n)A94Pc7L_Ysu6JR+hMK6#elv14!!-8^Rm*V0@geUPRDt_&4s1K>~lnumN)$qCe8wZB)y%IXlb%D zG(F4{x;T>wQP6DtBfP`Kot1Rgk{@Y>NFv`!k?L{fbzu+wS>VPVc6gV|OWVey_h9mR za1E36y_@sZ?;L*+&tinBPxki#`-qDzO|7dkpy$EVc0g=T5uqhPBEfrpP}? zat4I+I_c=0^F6-it-;VT`I$=pA(xQm2jp;e^YdmMy2N~#Vz>gCrBLl;64pkXt2P{* zz*MXjOoi|fSCY6QGJeQw9j-Kdlp3xG4Yto!=Z1aENz*3jvIky-63@rU!weSOkmjYS zY#%!6e&TtA|8o`h!XHkbJUa)rK2;)<&QDM$jft?(IriwITO05Qu>QUcuVYJ@lb>{5 z_>iD0gR-8{3@ZhnqJo0yn!TAcT4S{S(@vlEdNjY9jw$ZXkelyjc~n?M+5FYcB9w&B zI(eGa+Dx8+4`ax4H%luN5_+3LPq$ai%AP$JEm#zRx^WCeTk|#SQlM6UIAYd40B(0A?xu= z(_fT2hc1T`>sQFiIDuBk;jCMlS#JO{=&R0|=CzM4lo!!N`89y99uc=16}Ja(gNc(F zB6R#lTafL~>iVPbb(e8k(xv+nfIS0m5w(#->b}7n!njr3l>@V!$8u)%p}y%t)+Y&x z7^b2IPL$sg<@_9r9JR6^3q2l^{cWdA3)N(QY@9WC;`|Od8L0@}<#SGE@csW9uN|K>T>R#-NnggFISQ6_Ef*z z+(aj#Khu5kY;s?78d#Ki6n^f)((cp5g{%xz^uHiIHv^SVfb8Rx(eO*>O?x#3VK+jJ zc%mfnMENUHaw5RK^^I1_r8}SPf2S7`e(kjDlb%Ci`+E(XSbsxSrb>C}a=@jb-zJhx z4_p#XWWOUBo!!#dTsVz)8_Y2)LlJ)O0@7lMj%at;TfTrCPgrw&Df$ByaWzLrY#}~j z@nriE{^%;wJSCx+2sKKOBL0(zOUXN^>pMFKZ!P@UiPuBi^jW`tr9^Nd{R>GsCO3>$ zan0O7xeJ5%PCuO`CwqZb<=27yA#g@X?MJ zP!HYR;h)Zz_GpD-SEFY^va5&w(Chl5yKBC-fx-1DxSSw_xr4Y2USzgEUAQPU;5m_JTwabbzPHJcLm49 z^}~%^SDM2l9J^0yM90NJMSf$F-yY5pLqz7AD>^ zhd+P}z$Q{@Uq|f>hPkiFCxOSo@N78Pl}#S-)vxQ^%mSWBZ%Wd2AZ?oUXOQm8ihSV^ z7m#M=IG{Z@Z&i#6!AsAf)ME=4wFL;GYjBuLNwaSbpu$1PL%12mFj8L}m-!gyyK}3% z`xtrq;c(XiGc{q{Z5r;&u(};bP{E-99*z2~nVD@fC`ib{D|nf3H0eBQ)-Mlck(#wK z#5Ut_bJvm((;x7p#swGi6bD$8%%hG(?yRp&8j_Wz<07VHiJ{O&ikOvB@1QK;? zin5-vd#O*mq)@aj`d{(Do>^`!2)A)nX{MVh;YC#m43c#;WzqdD+f!n!?w7cP$)uFA4L8@*~Rkg;8 znG)d+F65#xUJO&z$BG)q1ud|DY{pNRF5Co(jWFT31Edy=-2yern4-R;sLPW#7Co1- zt)(08bb!=?F={oUK_E;~-5uRjkudDN!ytrJkz1ZSWUzTE-~vV#uCSww4GHOjddw7HH8f>A#FM;^JYa2B5l?v0E+ZPlCsm+hwza`ISgB!JIz;_ z9L6W4*zZnu+W&K1fXkB*0m|w&c_cn5~b89^rcKULArL$;z z5yEz7UXS0I?jw&aD3{|`JL2@0lU>Ywl9(#G^@8e`utZoi zMgFT*L~u$8-a)~on9_W2FW$gYT+w6~WBMCtc%s`$I==M9u*J&I#@-pHOk3h_h+0H6 z5$+-(dqJ%8**bOSHm;&$vg40%s&l5<92tt;?^;ChowV8qT4H+Tnu-Oon?p`}!pI=HY{IBE1($ zD_QKFee+?~+0qi^8iq+giRs>C;$);nhwV5*1(i6gB2e4u(k3He*mG8+04UNqlIE*y z{oy`z{Em}d@KrWL63aYUm~>g4XR%U)YE@X@!ttpXJi1Q2J?Rk&&aP4oO}S-kNm-y2 z4tH4|Uco91yPbZ05`9ftUja^xi)6e!0OyfR(QhHJ-VZ%zx@{5@{w`jlLkRs`qTd`K z3XW#e8_ra*V1(ux`cv_nu*b>MB9CLpCS~l~CO#?cQ|ZA_idk?{0w}T=mR#YfNw=qS zpn0Edf9nQRMW3&n@JROq9`OD!bSY_;SaZ6gAay^M$$UF7b3SScp3GnodBqalkQClU z*Y0NzG@R$c(hQX3a7rVkga-;|^};OM{L}#x)-ivt+6Pr~8M(kT$E_xJy&hRQR;YvP zn}b2ju{!CbT{~LySON>Zm_NMg64KT;1!NsujVh|>_mZCWgf^v}yt=}@4vSs88AEF! zZcRU)^&G=KSCckh3pc*`+0MEtBoeou;@GVVW`5DiUvn*K-MV4=DnN?)d@|Eg$WNBE zz&5)7084{Nsq2rzeVjh+!PK2BIfg0)_oV`6&y6csY6o;wlzMRfG$&^ge}*ac`;nc= zlkV^aY=(#Zet+kjJcUoU-y@(1A3(zLIy;lI9_V!IA*R`znoid*BxgN{WOQDOfrL#c zt8^Q&Yl)x8cSoYg_VRyQi7RZ-TmE8Yv=L&H_Udivo1W! znbW+@#*ODh6XByt$W742MWTS5rtlc2ORF=`v@}F8u|1Y-M}Tdjje`Un6xfY@@o}!= zoTR>ZfTm7?N*W(ejm6GqJr|{~t@!j4T+fB*d__djK9RJC;TrkkjOUbL;d)2hEglyU zpX55`lI}~6ip95-w6Y4JWImZPY1}Yb@}%KPcMf&+TipRGn9X9kV>$==>Qh`^+FMde zy=;V)h^JCSF=mCU%c2VT0zW+TUBYOWAwALCqsLffdQh)LmDuIa3# zyEZbv3hM#r3ehV|Ba*&nQy=|&_KBI5HB`pK<^rTTtOD~nE+%bID)p-hmS!VBZ=i^op{RK~k9RO=$z1kV;a{G4cxZ;#gE zkvAij2tj7TXtWWjoiTm+HLf8oucr&Nc{@NSD;GTxUSeKLF&x0_Oz)-Ph}-^__*7o! za&{(l)gqofm*N+)?F$4F^m+<9l!BDt>UpS;v9efBb$x@Y+LfkCpiz=q6DXwXjnqYR zG9SGAFc5HR#+zKjosu-vjNB1PRBt8~;|re8!2F21J&N0h^Q>d^7FTe3k^;xIyWp+K zEK>JYsx>I6u?dJ!r4+ zT(oz&)Z{g%4BQ^#fpAIayD5|b3tuKU5#dPGg?o={NwY=t_ohS*JV}Cy?7bx8sE6%& zS#|?T)!C<(EhNRl)bDfZG&?azGxEGy11>4=r<8(;paK|HAaA%Qe84rNg~*yCzp>`t z&~L&A$y}U%u^po;iy*o`A9C`vbZwu0wZX^kgT5WZ*FE?!l~h)@kZBt};`A44bqi2p z`Y4&`b(Z7o*xI;^fd9vwZ*pu**5HJDZGd9^I9b<$wSKQXye$pchVfHOUXb`8ALe2Qvp zWeR1ESm0=B6h7_DcZAI5u%Vtrd54`9{xBeMe1;s{9FhlP^F6vA5B{vPrdgYH@z`-# zY4{v96gNT@-v_h6Kkxi$PhT%q7Iw@`Eqz;smyRz`$C0=f`bGcq?J#*!r8&z?LAAKm z{`?l41-2Nz=*rUKAY;&_fnYkJw0?)U@lS6#eBIeDNcYcy#PJPs zFx_gbTpm))5CRO}bi%ZrpX1mC=FO}MUxXF?w@7bK5i?8+0q{YGXM%p)NiWb-EMkf2 zJ7nT)$|@B!cKEY}E;HXlRCy(Q*D2FhTpWhmSG`ff6!rH=&6sjwtqY?bYd3`NJMDUE zLotTQbqN~QxiUC0{(y{ps#z`JO~}%v+cty?ZEx)loi=Sgt5y`Q0*d%YB<7m2o2s=Y zFejdx4K(4$E~`87ZaDe@7{U#lQvMSv=NMesO2#Aakl})m>Q7x<+MD1&@lDs+cX5ZB z!;1fBX(EfZFg49*^FbLV*eG{X`8o0JpHt_A?#jdEIo_NBP=uw;nyx9ZF$ep>^XU>4qHlo zLnSnmw6ul92V|1>QHXFi&k94j8G^4X;kO4UMR2A>je2ZJ`5mQjTr2shx(-SSj36#uDA%NJqD9 zERB>)d_C^u{kyZKMFb}%rwUd4{~$l(=DbCmLQmY~i((@zJk4@s`ll;M3yTWNZo3*a zz!LEbF+H}xc{^Gol&@~t2iS`g#+7_duu~^;;Yg~-{q)}cA&g2d2sE{d8<8y^A2)3 zX|scHOpO{9P?2ArpUs~J2&j=;-M#!N?>GK{&tt+^Sw0AWFG!fIl zrRIuM!@*PIyoPqzcZiWTzR4na-pu`%#)pu z>A6sjDcY-&mMbSsF~>}JxSDH73o+z)cKvyCODSHO!As87DTiVC^2WOuRJ?{uNb^dB zVFJ{MJNZf*v9C!|4hdezFj|V&a=x@Bk-B97L(p2l@OM2YPs?_7h}gTwY7{!MR9v4braLe(n=9PF<@|qkU?QJH z-jE{cLeL>%#lR)J`uxcvfi0U(xI}RySC=+bR?+O9Rlh7zNYITbh%-oLiEU{QI&1lk zBV_5}kWtILz&^~;2pcpHK_b$AHeA{KKoVI1yC(kv)~ znTScKl5hkiaLS1FjIzwjmbQ+dY6|xRx0k}rT}9e!QpPhq<*&vlBVn=LBdPfBb z7n3%73&~`tQO%_vNx&^BfI(mxJ={$s9*%uho zQAcEAIj`I7krEyO<#`WAAe>Z*HtRkDL>X(glx=Xi} zP|gPB(o1&o#Ecz&&fJ9KdAuUyCbx5;ld}O7HxY7{fN47Q9e!qCH?T?P?Wxm}{$iRF zH}$pTCEURkrA2aLZc`j^vd=|4aUM%fF6{S*D2FxITfy-PgWdRpwGsbE5IJt{hF`K82H{T5nr5Cg@6U{YBEB<;Z;A`So7KFpEUs?e#Wkc& z?wteMSXNs3yeoC_1;$yuJG;BP`%j)b7fTYBWXD|CySbM2Nl&40MDYtA_1r)tfp@1sF8mZSf}+zfn-$K# zhbu~($pmC-)Igq5303slNY7!Zl6QgwPCM9eygO`n_OxEn>@{MNiJ0O(iQKd`k-6St z;g^`Xo+aAq#HN#7K-%RKEbEfZsFJXQ5^f6#oUts7g}Iw2t**52P5h#D{C-4d2JODK z6D(vAr*Mi(OWRP8yvMYOQL=YZ_N~}yDs|@EAYeYf><>IfR0=|*V8->%<8Lo&8>Eg> zf_G6cUqjyK2_^I4*I5PqaH>naSCZ5!uJwTGmz9F~B9q9|D3S(&rRi`P#SSzZh0|Tk z1u+OJXkt5qY^8u)K;ew`50AC887VS8CDYbM)ZG3(q| zA4zx@1ZCuEjN?key(xj?>Ub5SxN6XL{xqYuxrCA7iFb~?>%dzb8BSU4uEvQDs{w87 zs_3|Y)01vM70_-(^~)-YWX)67Vc>~p)6(#YC)3^?nfy$Rh~Lj<52x$eQti;;9)yRO zR>ORlBoawm&>iyt$rhvm*?DJN)3Z%OcC~{eigrL+dQerbRS zA;m9fK8|xp)-q)=2$A34;-gXu4LWgWvDe2bZG&*0t6NA?mm?q8lVf~Vq>>vbmtj5Z zz(K45?wUMc7<@5~g^Whb3L_VJa*{~Lv-{_*dUUB-p_+q%@3~wpW^rIG4&7-NK3Ol= z*M?)h>Ut*UnmbZMR#*eR6z!!VMuBX)`S;2e^) zpOQEfVe%=QqGRQx)?7xKtx%Z1Do|oOpKLV0mJuD`K!7 zfn-;!hD>4~Lb04~%E1l9%Tc^0Jk%AXbrXTu?NK9!OCq9ZA4b}%@@Z)682S-|>*3)} znU>U5w_u!MiR=+1qn#Q9r92n_^#%*U*Z+0Sv~`Uf$1XH)mS9EyNYe9F)PTf+$j%aF ztS~&v$Q3aqZ!)TK17yIzZI(zg~vE=nqv|T&e-$o0LA)PvU18U zB{tUWS!BW^&-4n9b17%y@230gj%@e9TTQzb#Uq7}r$Wxsab;{b3s1JO=-N8LKiM=9 zp5VID#-}Qt$*X=@A(5yjQWW0^&s||ry!}{+e3Gk}{1Q%glruz8KADv4nE6K5ScSlH z{K8Y5cRhY*`co;TR~G9K7@SC-O44jkq?rJlhaF2W&XP;9HL|8$&(mDX*-2XTUX)&v z6eh`hI%U$CBT{)$2}6hV7K}33UXNFJ>wI{|1<*IW>_!cf^gWaMN|BW%B;sLcLx*{m zi@C6o6~`3qvq^h&;kIOpv(=(Y*vyI~2GmPAeG!A4<%@>rxU63CGvqxIg+I&Um$sCe zm(#7`mGtLQy3KTM=vh>Omfk2l&&Ay}N!+-pmxGD%`DB#OS?)un5-wZ1apUg`T*Em@ z8q_r>uS(WYN#P5r@EDLWuE~@5;`Zd0npx~8Gf=REifz>@;2vJ&a?_sY95`$gDU?kS z(E;#E`-`caiwfoqEh3t2A7-(k5Os{WZ|)^7Fl}0QAnR$~s=$i>rR3-6x!mjQfkTBt zh4uma$Z$|`c$t$=9<{a7l%W`PEuwf|PF}7k)aP|%;oMg^{bYwu^kpzpQT|GKBN1O> zUP&=@=*AL;t*>yYF|aPdt6WZ+znjCEl)M=Oi?JsPU1DBMG4ywl9=X&#eHiw%HZ0== z0SwpJ-W$6IGWZ88qwpHnvlG8ReQrbQXnz?b0BfNo*cHkwKU(_g#Yuv2p;8><*tM`IANlt2MoJRtkc#3iujEr<{VT$4hd-` z{qQE|Ok7S_9kf>?>o}SS-%LUVsXPo(7tpXQmhcuAF!>2PRC+|i2&Cz))HJ^0ytEd2 zyBE>Z-sU(9 zY11Hp;kd;5S08fLG%vVjQ)2~E%pca(!erao-d70BF$3o#&YSk86zsk+Ffo3Vj7Q>H zm35d|u*{e%7XCsY30pOWp|`RYKIS^+lJ5E#v^{~bZyO1v_2bk^D<-eaU^Te1GwuG* zWu>j))orXQ;EDAUWIYJ1=y$D7pLO3q=}gBZ-GIuhC>$|-iVRG)I?ZZt>A z3;8<8eGlJv!n9E{!-!W6LlSwfMLco-fSl`ak-1ZKIvl0pK12tRGJ&0p zgB6rcLwflST|k;o+vv2_<4e$wD2P)c3sc2tWNQb{C2H;I!D9Hat4Pb$E1<+%QHLt- zpOBko()k=_XkxOahzq>PQjV^S%Aymo6jCU=)gpP(y={L5lcSBxdNhn)ri> zf(2z5OR(t1+!{ig;nyx?vTLFHQAIUD)h}zQB=9#BNH@4HL`6>w=_DAR8Ke%sb#-Y^ z%OOpcw+b<^GJ!Pxj+!)7d2I#s8oa&X_b%cB3{@2{5&nUMjC%Mk%9ZnbgN31$slv|5 z@JA<2druuF_7LsHETfA3Ph_V>kekgGnL<1QL&Tq*dOdz6!Iwj2hZy^)gA?ap$jPyx zyrjP}TXGT4yA=-Z{YuNw)cW2*nfv>p=D*k_v zzXjMa|7I6XiJad##0iyH`d~VXLe*B%?VqkGEfr`C-nzorw-ZUF`Crt`9-^Se#`fG= zUuSv!+m)r+i~@;+MZ4FZ8}3JxK1USoe@J^IXg!iq8y+bINB-+#&YJk>9P$8yBSj)< zycj}57P&7L0*)tUYieD6wCE*g3z%4ShXT##Cp14o!lCC3d zH)@U+9~64$%5c~p`NmGXF#?yAOHm5rYSn~W#%&g`W55pjyR<98hlC+TV5UP2rScY$ zCF3%bad}`fz}&&84QKGOPI^Hj)e%S>mm>$K^y;ZmF|Y6-r%QV&CY=^#h~m9Gd9N3* z4d=h0;y#ao(OFd%x4eRLr_C0)ON6l7n|&Tf75^2<&*}jl+K!V2rqWS+X?b679RrNFA;;T$LKQ zqa)8<-W<2r#(a_;IB~SI)B0mrZ1CGMgT-Cksx6^0S9NlhC-b^5q)T>h} zL+iz~e_mU-hO0??CjekFsz748CYd<5meN?57SkH-=`AyNC0xrnF;XYo0f#bXZz&JP zfu-Tv)W8R$&2 zz1OAQo7d{C?7`i!I)^@m1dZ#t*4c|tYXh5fUY|M-0iOq2at|UarN)WMmM6mvTw$7} zE=;?PY83p2DuG1ZkfJ!>$gYve6z5(TQ5^bj>O)D*Y5V-%vgMAS)mygI@wn1@ z*Z~p?#qNL_B}ft9jKrL`tNuFbg^ZBugL)x{J9*lyDI_CTLty}im4G8CfbR-}XYPph z<{1k_H3)7xL@58}t|85KC`IC$P$l6=N>~R8c+{MLS|^-z>Mowu9*2!lYmohia0{1_ zmSZ76yA^6Qp-av!DTl9ZQ=K#bsc1&Hl`BdMlscwK@>Zo1TY*8UT2w{j)w*ZQ5jo1q z(I77j$4z1QJ4C)7luXP?66qKDBQ-m)5a@@vq4WAWcV8Q~g9($Q|4? z?5{2jvJ=gZacLLjMA*P4owudV+u`1Ex^GH}=Si;^Zs(7;Ks(+^Kj-##u6Z69(=EW{ z#P~)^3BNssGv-oW+s4FzP#_L5y@M-Di^n!CmJ3k4$C7s)c=I4>?A0;yXy*dWB4X7J z$GL*Up!hH>wK0-p=ax;wm!LaRkb2tW{d3$^!KL*(xw5qA3;RYy#$rLr@qNd~U*ia> ziW-u8XUe5jUET9W6CP)cP~Tl#Uz)cgD0@fM*p=<2%W);)u9R>%B#fK5c}gb6G9)}B ze1?^!ySbiw;;*Lna^X%kr)#;SaXmF20X7fqv@GWfIT=VEX|ZM_&XMY!-_4H57=;Zk za|gjID`AuCNZcl3e`=2|qU2a2+e|X9fs`Dk zo!t?J^wz%iXl4~Vxzp7zo?q6zR89kM!nDcV9sOmVGRNmlKXhqRpFLefkR z0htapWP1r!^e2-3>Yy)KKl`or@>VBKyZ`kzk70@K?xf?k44T~7SB%(M=yeLGXU}Gb z3*W=J(+r*R%svHOYPL}gZO>}(jPo#-1^2Io?XDxu_7qa=1c9Y_@@@MLD*-1_fCY_e zH&riS)EkA9T>?HNj2S80wGCM+c2EVa9-TyvTny{BtC#?h(ux&btc~MjPn2Rm#l@t( z6@s+;p++;lB<-XmzNgW9s@uM^0OJ^e)-dCWCXZHHo2w$4vg()l2A+&Y!gf&@{iJN= zdk+dSpdJC&uyMFgIMu~$N_$pu&D=E9B8vAk^0JDf@^!IjjE~`TCp<2RkbiD$WgO9* zK^oq7x%V)F=HUk;tqrHO$k(@pmouID%p~S9M3XW0ZIwsL&Z06pCAkSvv<eojvX1iDuV55mpz!ND<7G&fm1GbC_kXjzYJ> zBJe#|(@DA)Ic)d9USKgw=(!XsAE>;aYwG~vRt^sB{th~^SVHR$@ner`y(qoy1~%#J zQzuQAY=LYC4qt>Kp20Y~U`K$2CD)Z^H`Xk*jiZWxK>l^$_b{v`ieg$iPfi)Sj5JHm z0jI`wfi5}Al*1hZlUIURgLt?yo2@>D^ITe*HwjwS=BG6kV^FZD4IbTER;1Dip5!TIurE%E_SjBCJzF} zK5*?N$|7uuOj7s&D&*jeH^v;q;+Pq4-~(M#+H@==yK!oa!Aih`B%s(0V-5|siFFrN z!h>BvTIJXx(pZNsK}*C#DB>11ovT#*5ur02qm03^yBzTELtR$dC{XP~wH<2$p~OCn zVmW+P0ill(BReGQ;Vv$1NuY{VE3QBwF^`~_t3XV_iNm4d%i(`rK-zjpoi|7z5j~Pb zhmr{O@?&ATP4}ls^<+}{RI21uDEoAT0=@n2EF1uA9ci6DbHdh2 zC50?>^0WgKD}3vQY9x|L?9(ZhqgBo7Hxj7hIZe-SVQCph!ZKlMUA=(%i5gPZ;5fG|Pyf=E-wh zVVbSi5Rk1$mzw8NO)2-?0}QYdk(u>8SCQu4`Jcz*AcfZN0aT*8Z7LfS)cJk#JU z#<|DQrRK#{GZr)A?Qij)mGi?(Tt=Fm=C`}yDMVQDzm)u(f4vJ;J53fc>#?ru%Uny^ zzD@@cFw$ORYgz(j0Gc{QbQ8s@%6-1UwWZm{ zyli6HlTkx)!WsgJdLu>A`{Un?CowSGxN#L_WIcTMCKr~Lc#`8Lh8o8xCH2je%2_(D zKE;~RtrKHXF3zf2p~LeQmzOrqm9XsQmA)yXOUYZMM0-%SXWLI^@me7nvbzK&Z*wJS z_JXiXgc>!#67hD5u<{WZIe!-m;Mz~~4(Cs+vLebc{v1=(?`*z)%=_8(zsvbAlIwS1 ziFh|fFuH@ui4_Pi*Jf5$c#l)3Ee(`S{4qQ+zL$)d0e7wyi*!Y&dt{aGT74Y4 zfaB8^hx4QGJ{OWU!r*1efb||H@krqNDUi#NrTID@pxqg+4puaMkHQCBS=z$1BbqR8 z5wH$liatn1-11dTtl;M1@F5qm3BNLZ?h}Y8Yraeqq-f%1M&Qvj*q#HG*h7Vcs-$s^5dlBFj1*1uk)Gz z&q>p4(+RcZGe|Lig3NSis&Sji`bL8gAsP}&jC#WOGmkJdWwhqGQT|=5p>1Y$*Cc~DJFHi~njC{P@h+ulRJ!sjp z>4q=5h_rB44rD6wW*JukzC;0hsZjf9oSP5X= zknnX^m1f+Q(I&h%8G+<{gL3G1!8R;xa^$LS_@--Ek6)Pnh%yKxABPj;x5#)E0Q!0* z7ETsn4Sw6{CTB%YU#pCOBK!^s*MZO;QN!^H0p;GpcU{4bqziVyyemwAm!9uY4`0m= zx4@5Dvzs=fA6f;1?D*F2yN=1Lw!VB6flNgcsMvo%b~>AQp|YxGR(;@y&UtLowHMyU z0*qLGL>Bp)<33a7by=nc;+;rz8?J;OyMop3Lt z7nH`K+vgph#TMW|g%5d^bySP zME5(=F@o7?;~ds`ER=212c~5$Ayag5zKxWD-#dSrXYY7%2$o3pbb{^?@QNa_xbJT*_NNF#y&(yf^r z%_D|t|7P!})qwlAt4Z^uBaA(vdPEWbhs0dCt)yY%9Ug@LI_q7M?mV_}y3%<@6-_GNfI6`70IJuIcd(9fJ}%Qcx7#YP^hB66zLiKRILO;PpBDvM-Yu1Oa1j8vU0W*Qvy z|3g-lMY1kWS#*YAf)2VX9N%}@CtG`+9nD1-?Fz2xB>e95c_l})o0Ko{V#Dj9gE zWsqVWdVlFIAv-hw8@!1g$B9-ay_%O!Kc ztt*a+*So zRB+YEoUGZw?aOk%%cgPC)P7h|Tj3HE1^Z|?Dx#SptlXm$o^$WQl4u?$PVLj~b;5IQ zLs6m@64^9o1kV&n>(RGk=it)bpnuo$f6Np9dGgccX(-CmN}h1Fy=PZC+$^__)1|i1 z?CLXzId_G*UFh8pCAX2<61&?mq2NZsiAB+&rnYhN)J|#J)lQgvSJ-LyW}{EuPI}`* zdDo}OJDDZg$GKD6?T;D>Ge<#k4V`Q;O4}hyo44DNGdp~cSI0Q*fqP!jx?#WFpk#Yd z(oT}@I(oF{zQ0RQf{TX|o#X8P%h98#weX|wd$&I~K~zo`_qQ4Z)&U6xT)|{$5A|VA2l4+&_@jof*STWsF{2C zaeOd6Jo$NiaCf}m|JjMYab**NKTn|#pGaOWm3zr>6zwsFy1#T$z5SwkgBwV==gol5 z$~Yz-ppzXCK|EWJ^(&5wck0MIGm0v@N{n-|XP_ zQo-+QL3(0xVq}~(wI6=eR`}gS!9E&}iWsE`OJv@wmx2Y@B|f9$?5S-H_j~zLvb`wj z7)kRCdt^w$<7zTf`oe?N#Q*)c z_HK!zvL`6JdXPHW@eS_)AMIHCwHWqrZ_@O*($tP0Mh%7wNfhj_^{AYQ%JHC#!NI}x z;9?Gx*(1Cxao5U33cBpq7CCNRpDA`byX`W2Yg3n+*xxdR;x-JYp^6d1P$-9CkcSJe|J0iiW zIdMHxQkoGdU(&MwoKflXm40My>HFP)IGUeW5LY=ar7Hig8x6^#@)jyD&wzI#IKCKc zPZ!0-9JoWd>!;|<%HRQDQ*v)(AD<_aY%of=SVH5y``+2q`whO}^jV1|aRI5FV3D`I za1qY?QndApQ$!^luO#dL=vZ&`^o#V#!LtRNFS<0YCAIZ`)KGHGyT|?eqE@5y%cL(7 zqz?*93nnI($Jv^vv>fE^E#|fowSI!u9k&TjT=c|qeeC38&rggK9r!zrx}|Xw;pd8@ zxWOprNpiLba_)Jkde1n#?OkwJxbPzAf#E&yrg%zF;^erptdwTKo(+cu?)l=L@&2+$ zALA4s!%sImduH0;!F{5xumvc6=A7AX?l>n{LiMNVpb@| zj@aHOrerS)?kTl0t{}BBuLGXyqQxljX%d?Y!CE8S#poA2X(qV3a@T#ub8@F7PLFFy z?OfWZ#c)nU!G4>M3OYkUwtV}Tcax^i2~HTyPMjH6lG<+RfxeI2Hly@sN$&@>$A0*X zX_FIY$7wqRZ}{&G?g93~gXW@)=R_Yk>TdVVJ$!C=V}AO~oZxiGbK}gZZR(?@l0TH( z_V#W!D&agO*wx!}x3FulE1oz%PM+Gb7c~+lkAmperNt=iDrxJQ*THrrt*1u~1-E1l z&ZG(d8}$5w7t*pbvK9mj?7Z}a;VEK?)p0e$Q~MIM9hUMxWQi)gK!tjJ$zDm?;2~-A zvVwh>V9#nzTt{k0#dBK-E7_~HDBp$hh5Ov$o>W@ybw{2u7(B%9qBv!0EA-sI4pT-g z<+d9(7-hX!R?B?6QIGEombfI&dElSge|Hq(HlwtcN?RpJyT{Yyj4p?d?95JFmSXm3 z6Bf1NCr4gWGsnYW=R&z17jFmr>G`f?^u z4;IJyX~F-;he2@Z?ELA$rR~W*iL2t=shttNdy`=ayT7pS98p?01GF2}aIG55aO!?|eb1C&xOP@>PnF=N(6gr{u8T{Y6EyVS>*R^Ss6_a#-NtPH@9CpYa=lMt zUrG0Dx)oV=@;+^e8{+ypq*UMDy}jL=i}KwlpFV9c6t~x{jV9;JX_UAr&YRj-x2vJ> z+uIfH`k!{A0&Z4-1BBx}adOJEv;m8H^zJ$`SahZ=oNmDGmN7AO60cW>J*RaE2cYSgbf;7>A`x$_ct#Kony zE3|uy|NX0%A}Z=mMOowSg2-z|S_`GXyO_v6Vu<2-l* zK=7N1?@{icfwy+@&bUwd|6g7u;skjzWm z8`qWEe2N+h>xqJ}j^I%~QL9n<`=qZKq(9)DuY)f2;GBt>!HOnve_YM@l;+Z2?S^IT z_3G~T+1`~cs`3F<>Z$KzX_}h5IIW8dOYuB`$%$ZFp6svfzuBmQ2UXyjj$LCVxywy> z9a`@H(}II0!4j2tD6S{Jx@Kw+5r64@;^`m**Osg>%w_k=RG#oE=k|4^b0g&L{}4=?UA5Qa&oB zzH*)?90r@}MB=eHV`@Xp`+QcSmZDsb%Vq7l*CMOmkbA3}wD1=6PsF)XJKC^Ui~Ig* z?3pL3>`9dwaog1!O!n=bit9*i9_*`M8MYKqa3c{15Eaq@w|AN==P zG-@*Z>`@S`t>Oy{!hzS|skF)VqmrIglCR}Jr*_QJeM>wSSCraDZ}%3%s&;>2&>rWf zgejt;o>!FRGS8wLUO@ChTttVIW=qt<-oFIRMG0Sw63)t=Ze`ZFW2dzAIl-CKlM^q+ z2~*o?%xxjew^wUXzL({5_$|KLYG1e6whw9blXxYrA+ro-EE5uL5lJ6_=>)mZ}4z_K<&QEx@Vq#-l zP-;JwsL8ODDA<4dQAuwoNe6n*zQC;Xh2v+XPfNTR*O6KWI%+4ZBMPDpMsA}~^0y=} z8YB<4(rG!tqM1m%9jE)h9xe;pi88z+gWb#EB$;`^KAf{}9GV=EyeWQi0Ay2~GsQC) za@$IN#QirM74fbj{PLsWu)aN_`$WALr|u9;|L+xI)Iyj#3gYIXgzrmeflu~+(z?aZ zSPPc0;NrR|GbblLh%=|wBg$sTS)vZLp_=H-r|7ld-`tlEkTI%vvwHPY@*a;2&!^oI7nNE+C2As^RZ)<8o@a1= zZj|z4DIJi{y~w9aeED%!A~=~Zx)wR{Nt`~lJ?K3z9@_Ss?tR|C^vQ|OovH4zpid86nyIFWbH5i$J}8xquX@?b(nrvn0r2T*lLvg8_5k2?cO~8GwsHpHUB-`U;sr{sK+ej``!Cp}Cr2FV$R@#elekW(%u~e&o#P@N!)IQt3`n(#7 z^86r=bESjT>AnwE1@{-uO8gk74gQnDER9+VKV}r{b)ZVqQ6WDm#P28XY9=|zp7=S= zpV|SFsF5y#n)6pq(NX@8M69KXrE6^J{DOm^$czQGu_TM#CGjdQ2Ad9l{s>!u=m zH5(Q1n*!7n$0AV#+6x(M{L(NEDT~BPuU<7(eP&2fvP~(-YbAf=9TgMu5o zCAZgm-vKz-c?k9(@&)fm-kBk_4ak4pS2%1nN}pePtG@Vb+;IDnywExKXj%G#M1i=5 z)VkFBIxTl!4M$}ZREBGk@;nYEeC*@A;6#Z+ar)F|f7D92fJQ<1gqf(xC~;wlb(eG( zCIu&z1bb}hvj!#mpW())NSr&h9j4rd!qod~G|GOk>^k=Q8k5YLlO7zWN)(Orr?}?* zkD< z+wS5dcU4cD67^TZMYzAFS#uV zo?{&DjmBNmJgrC7lva%~yy!|o$54X*C(6XxQ|r&{btPeLLs6o#64^lwAI6rJG&p&6 z%ECmsI8|yVl_vi-{FHk&7NsjMot^T%`l^0FDtsAmSt^fy{2Jd;5L+)t{$%C26esAK) zxR?>apZxc3U2@mqfEPLJBTZCW4Ym1Y$3s)Q-GXIS@^(^*qvDcM>l;LEg>xqg`tIFu zRK(GWu#cPP@<=_SW8%!MQ<@q3zBV#wDN1#$RJO-^3@>)G@=OHhzy@ROHRGhIEe=sr z$pta*=;CheMkUlzg5}Y=!XA{}uwbnc@FgoB8`cMRd1TqY@e@p=MA*yv^Cy z7A$zKX|P!h=R(7{n&AihBI5Q^--hkV5>?kob^G`oPDwP5%lLo$9mWGcO{B6{(k(cr zf6DNz#Ei6#bAlr*!SyLt%agOl+p$eFjq|2A%llOg0!LY$0K-AmP4utb;w}+^%^z53B}T#+8gc;8GBctLAMuoE3Rr zigx1n&J`8eN|AardmYvu6n0}~Pjf0lG;T9FYf#qAnZXJyk!T&)IxVGI_i8^Zcdu6u zoHeStjjCM`6b=4#oERMX2u`~R&eWSeX+I-Pu3-wE0690fqf*>TGzZBIPeQuqn?+h~-& zi}ZPZ=E1cJlYHA|`84M4an=KGW>Q$W>}oYi z-9u_U{NMt*==MUx6D|wLy}w=0ID2Zt`MX_+x3Bi1yuIYLX4~uM+iysCkFP#i)1p;I z^5mA@aUm0eKl|_A?p_V=du_3wbWxps)M=PEK1E^QCq4ux><5ppm_I!coC(u6t~9k_ z-ni)$&VJY}PgG(*CHe|??Q9I{6YM_(9mU|u4MBIme_T&$qei>h31``^Fi*2l`T^4G zq{eHG-Ok|&j|dHn^B;HtjBvxCQ(1iy@+uQV}yzDQzp zTt@$tmXNT`u!a;`k186YqUu5W(SZEm?tS~@o~RO(GCONl@Cb-`?xi~Re=H|zFsvvF z_Sbq;&N$`Rzlp~#jaK%_3eU6%b`U0o{j>3LDX9(9#f^ov#L+&Qj_R184#RYNt*kxV zVp`DQkG28fd6wyML8*0a_i8mPY_C`Ml`E=nVpL)7sd3)x`i`{j&WsKYCMOnocxWOx zSY>rHDXuWJgH-!C`R>3OqPixl%NZug-g@r%OME-;V3`{4@g9jO|6@UUn+xYx-j||d zXiM|8{Xy-fU5&^Hmz2g}2wU9jv{A$Mhsu z+6&_1Qv0DD_`>(yT8;`?7$nHa8Cp0yF)4WFXJV1M%)$D>-RtXTWX+mVKchjT`so>2 zb7s^J-cvugnL)x9xPCBRTt7Q2ClTC2XL0aHLklP8OJYe7Ov;`UoE@<+xG$)$p;+?U zD3&-rm{j}3#M0oOrHN&MwT70QvuK)YOR|G;%SFNIf6IecgJQxzPn;kqV_C?UGIP#& z+cr;UNSx^P;yrUK2?|M^cEUXj@ukGv}o|7 z3#JEWI3+F!C$0&MK54WpBrXhI%=B@_CNA<{pCY|?zIlm@{by*|U0+Q6=z&XuDN7TV z1`aJ5oVXR-izzrVt&)tyWxWsJPpo>^3hF z_wLzV*hTkY?%Q*UJBEe_XYV(qX!=YY)?nF7JP^cFh8Bs21=a;&xB%u%99m|#T;nID zduG;y!Q`chhXOOhdE0AkMzfJi6YGN)YGu5W>v24ztW zZO|4aQ3~zQ9v#pT6;UZ5R>=3^8)k~_Cjy;L0|O4Wx)}@%*!!QI0%C=1Xq}FC58(}U^K>H zEXH9xCLkRXF$t3~1yeB%8JLbt%)m^{!fa$=4(1|(Y|KLr=3@aCqIS^7&a8ta!sD?N z%di|L;6$8+lW_`8#R{y%Y4|m`_AB!@oGm;D=i)q^k5yQW3$O+k;v!s(OK>SJ!{zAi ztL=fSgjeGlT#M^)J#N5_xCuAoHr$TB8t8|+gm>d6U)0Sw&WuRIy(Zj;`|$wQ;Xyov z^>`r|SF@V=A(E-_$VI3<9Gs3;we0hXYecrD{Tl~5Wa|)@G@S(t9T8s zV{`3;)3VzJLetw{Qn`;vf8rgMu^DG7F#}3ZXEH;9wL*F%-ukD1nkF zh0-X4vM7i0sDO&7gvzLbs;GuTafQOK#9_k2aRjR4NYp^OuW=%d5gv=0sD;|7gSx1P zN6j2wy`hv_>1WMLV=d2XsUybVe6+MK^Ru5A;MY^hO`_ zML+b%01U(+48{-)#V`zq`=Dfw#3-~^cn6FXj>C9NKsqL35+-8`reYd0Fddngfth&C z;`2JPgmW+#31nj)axfnYun>!|7)x+GmSP!};{=?DlW;Ol!Kqk*w`!W(fDZpJOR6}RDb z+<`lB7w*PU9RiNVy~6u&KOVq3Jcx&|9vkp59>Jq{43FapJc+09G_rl@d3ZtiB3{DF zcm=QGHN1|Ecmr?ZExe6)unF(tJ-m+(@F70JW^BR7_ynKgGi=4@_yS+zD}0S_@GZ7s zJHEsB_yIrSC;W_GP}@wbgWrXJ;7|O8zp(>5@elsRLAD?HkRJt55QR_}MQ|{Rq8RE* zR1YPDB~c2cQ3hpE4&_k+6;TP5@j}Od7g0r671eMk4#VL%0@ZOOYTzgwjbm^uYN8fu zqYmn#9*#pA>Z1V~q7fRS37VoAnxh3;q7_=B4cej|+M@$Hq7yo!3%a5kx}yia)*bu? zy@h?y7yZy5127PSaJmKf3=9bd8>Y`-Kl+9UjC(SdR^O7)J!hvoovXCS}}=$4qz}PvA*Bg{Schp2c%` z9@kiCuEi_DSMeHN$40z?Q-ibQGgshE;ahkc?_d+&#d~-kAK*jWr1_h%Mffp3!Ke5P zTk$!*KoOnUgYmWS8*ImS_#QvtNBo4J@e6*%Z}=U5;7|O8zp(>5@elsRL4F_kkRJt5 z5QR_}MQ|{Rq8N(f5R^bkltO8gL0ObTc^u_ua5O3jE29dkq8bjxVK^K|pgN934IG7| zaSV<{P1J&upfa7Vlv!I?2X#>o#~}^%(EyFn1WnNl&CvpDeR=nwwXhA^q8-|!13ID; zI-?7kTBw_$t`A)g-A(9$p6G?%=!4NV9b?d6H~<4N2*+t44MT;)P+!;pBZZ?d7UPhP ziI{}Rn1ZR8h73$cCT3wanrgHe<_Z(Y#ysR;J{Djh7GW`#;CL*>GAzdlI1wk|WSoLW zf-(5aM{%lf1ylX`5j={!EL?Zv3E`7?3Qyx1Jd5Y>JYK+ycnL4#6}*aB`aZL3a6&%p=65AhK;V+%gUEDQB)d?wtA&+!Gm#8>zl-{4zp!*+a!@9_hE#83Dczu;H= zhTriA{={GS8#^%4T5nXi{u3T#DanWYc*&RdGP1p89*UT7Fp8oWT3SiBLJ46>ltO8g zL0ObTc~n3}2XsUyEb?I%qpPqRx}yg+nw@W;x3CZTq96KW z0G4Rrc${gYeijCsFa$#}48t)3BQXl2F$QBX4sS~N7Se?iF$t3~1yeB%8JLbt%)m^{ z!fa$=4(1|(Y|KLr=3@aCVi6W&36957EW>h~fD>^NPR1!X6)Uh3r-lDBb56tQ!ZUCt z&cfL^2j}8E^z`w2;e6pLti}acg9~w@Mo+>e!b@=(F2@zP5?A4B)U}IJ57!8<#b*+2 z#SOw6aT9LFEw~l8;db1CJ8>88Mr{q$!TrJqunrI6A*{y+Jd8*1C?3P(cmhx2DLjp5 z@GPFg^LPO-;w8L{SMVxc!|T|HH}EFj!rOQUoA55)!~6IEAL1iy#uj{xPw*)|!&ZEb zFYqP4!q@l)-(nlK<2!thAF$I5`v*S?f5OlB1y2R1+hsnD--UnRPyB_yu><3+(k9?v z;X!)7`H&w4P!NSs7)5X}ilP{b;}DcUNt8lqltEdPLwQs{MN~p%R6$i#!=X3~hvNuT z$C0Rkqi{5i!Lg``TDZdEeI@D&>)|-0Aw4+1BXc6^3mc#z8lf?opedT6Ia;74TA?-C z;5vz}M|)uh+#tLWorPV{72VJiz0ezd&=>vC9|JHDgD@CFFdQQ=5~DC0V=xxukdBF% zgvt0`7vcv@6J}sKGI5J?Z^bO(Y-C{$<{}aP&q9+xj&ME}U?CP^G0xXrScN6Rt9T8sW4W*51iUGH z3vc5cY{I*E5AWjxe25L2e;6lf;3RA@;bVM)Pf@^I3gR>2R(y^x@Fl*&*Z2nCVjH&O zJA98H@FRZ0&-ewu;y3(`Kkz61!r$0|o%jd;;vmP`3ZNj)^byWN5#hloiee~^Lr?-G zQ3|C|&H`K>WrStJ*H9i6P!W|-8C6gfhv9G>f$Dg{40{nr36I7xI2JWg3$;-Pbx{w; zAr1A>01eRyjnM>6(G1Pe0xi)BtE=gWH^ONGm@94DZHo#l!+S$GOg#R{y%X*eBc;7pu_vvCfx zeZYA*S9l)I$11GG1^7f6pW-6n#kd5Q;xb&0D{v*Q!qvD2*W!LZjR(-*LH+@_&V=i6 z18&4kxEZ(L*6=ldb~Ek}-if<#H`d}FR544c;(p-+SceDk5Y}S@9>ybh6p!I?Jb@?i z6rRRm8a*5@2w%iYcp0zYRlJ7Ru@P_JO}vG-@eVfOUA%|)@c}->N7#%l_!#wdwj1CR z;ivcvTk$!*z?b+6U*j8mi*4AB@9;f-z>oL|KjRmiZYbgm{4V?hf8sCvjUCvDfAB93 zvXtaQe%zvgTTw_@7)5X}ilP{b;}DcUNt8lqltEdPLwQs{MN~p%R6$i#!=X3~hvNuT z$C0RkwSMgP;27btsEJzm)r8+r+(5t~IL?GL)JFp}L?bjt6EsCLG)D_Gw)iwbOJOUt zMjNz6J9PDv?1s+5F5zqFhVJNrp6G?%=!3rKhyECVzp(>@F$6;~3_I};Mq(8Dwh8Em zvBGf}j|oV}L`=eDOu4mM+G0IA|4SwipTIcp1_mns)25JO87LM!LxV{ z&*RAU0X0zD*H{NHneZ}R!KzJWLK7T(4?*o1enM5D*!1L240t)M>GBK#O@ zyyZfCCfth8@ddubSNIy=U^~9U_xJ%n;wSu!U+^n_!=LyI12HHdr&i{0{~v*Waga}! z5BX651yKlvQ3MC0D2kyt4#5$qj@8<`0A++_Q4Zz9|C!0<@tU{1jw&WpMKv6X!*DoC zpd^k&4IG7|aSV<{P1Hhd)WKMc!*NJM7Up22FJlxM3!9)RnxQ#bpe0(NHQJyp+Mzu< zpd&h=GrHh&AN31#7xq9;^g?g+L0|Mke+v02a#7(#vx8PRXhTCxm?!;ZV8*6b7?!|q$9}i$19>hafj}3Sj zkKj?v_2nh-gz!l`g{Schp2c%`9_4IM%Ht*B%XkH^;x)XEjd%k+TL<*Qo5Hv7Hr~M| zyo>knK0d&Q_z0V^1s~%Re2UMo6-|9*&G5PK3w(*M@I;S*C-JrL8+?mx*pBb;J$}HC z_z6Gb7gX_~tKwJTZ}=U5;7|O8zp(>5@elsRL6-J4HsKedfUqD6p)iWzU=&3$6vrVb zfs!bN(kO$nD2MW>fQqPu%BX^>sD?vv7!Jn~sE#9114rR#9D`#~6Sc6^%wC3}Jp+cJ zt_k&Ux(R2XzOVr<5?+kP!X{{nX1K;IxfU&jtRK@+d1GoY!E(-NAM^f!{c}YPvR*& zjc4#Ip2PEa0Wabuyo}fIIyT}ByotB)Hr~M|yo>knK0d&Q_z0WP+ze}hPlTW1b9{lX z@eRJkHf+at_#QvtNBo4J@e6*%Z}=U5;7|O8zp(>5@elsRLFQjR4JD1)*nhw`X^il~IjsDi4fhC|WX54#PH5LU;LsDYz!G(NG? z{}eTawZf@=gZ~|ky25%m4r!>5255*zXxTEL6`Bg0VXcMw9<&s;LTj`^TeL%abU;UR zLT7YAS9HS_n!gg)*x+7^-X`?H9VXm~{=xwmh(UPHgy%6-I1Ixv0wXaBqcH|!F%IJ~ z0Y_QM9F0lB$(Vwvn1&2YM<$m00#3jz;cR4K4qme;zmD!o>wz2-=3@aCVi6W&36957 zEW>h~fD>^Ne(e?T8+uFL2P;iD4Sj|EaHjAqoQ-oZQW>LgzHk**;{vR~g}4Y8;}Tqo z%Wyfaz?HZPSK|d=*^9VNcs*{wjrhof&A3H)D>hg#AI5FM+i?f(#9g=>Ee%Vw!o9-# za6cZvIy{JnupS%mFdo69cmhviZQFo*@RaarJcDQP9G=Guco7A>xgcHX1lx$p~oiLdZAzQMQHhVA$c-{S}T zh@a5bdaWIP7XE@?@f&`}ANUi0;b#l*FF4yGbxyckS$xjIzut0?srir}g-{qpa4?FZ z7>eT%lt4+8LTQviSsbTZmWB$#il~IjsDi3^&0AhaHQ}K+42Rzcr=c| zv8ah!sEsOM3mc#z8lf?opedT6C0e01+Mq4kp*=dFBRZj}6;CsC7Ir~< z!vr1BUDyLX(F?uN2Yt~G{V@OoF$hI$fDXoB;SdbPFf7;n2^c9Hh0z#;u^5N(n1FOl z#3W3{*ER&-V45%k(~*f8n2A}KjV#Q;TqNM(V3{7?m6;=)j|EtWMOcg_I37!}49jr> zPQ*z#8K>Y>tiVc~hSPBd&cs_%J(7`#l!O#9iJY&MMcn;6w1-yut@G@S(tC(vYk-)!Z*g?ME zH%)j8Z{rx%tTktVH!Ke5PTk$!*z?b+6U*j8mi*4AB@9;f-z>oL| ze=Bzfeii~Jp*Rjf36w-Bltyi< zxjHB#EQ@j|j|!-WN~nw~sETSh6o=t(9D(Zi(jxE`juIY?V{j~Lq84hS4(g&Fjzb#i zqX8PC5gMZjnxYw+qXk-`6{x}qDpqX&AT7kZ-)`l28DV*mzY z2!>)9hGPUqVid+=9L8e;PPQR91(Sr6F$GgG4H=k@Ow7Pc%))GBVGiaZfo#k}4(4M4 z7Ge<=V+oGOQY^!AoPZN?5>Cb`I29|f5~tyGoPjfO7S6^wI2Y&Pe5}H~Rsi>5jqpNT zgo|+rF2!ZI97p-FAC0SoSK}I7i|cSbZorMW2{+?cE4<%uoA7qrfje;*?#5c&gL`ow z?#BaIhX?Tx)?))6#v^zXkKu7VfhX}4p2jnH7SG{%ynq++5?;nDconbVb-anU@HXDT zCcKOH@IF4khxiDau>~LF6MTx#uoa)<3w(*M@HM`{Xe){_*e?7I-{S}Th@bE?e!;K! z4Zq_L{E5HtH+Enr{(;-|WV)qJ=D)&&{3`MxKMJ5A3ZXEH;9wL*F%-ukD1nkFh0-X4 zvZ#QHsD#RR!fw!$s3trVhv9G>f$BICHE=dVjRX}0@5)NlQ0=mFcs5~f$7M^49vtV%tjXGU@j8K#ysR;J{Djh z7GW`#;CL*>GAzdlI1wkIjK#4mP8F`e&wd8KV5RUhyk|xBKHQiu(@pa-&o<#4oQw0& z*^W*ZG`5&E!DRM3sYmt~9uYo@$M86wz>|0iPvaRpi!1D` zT!|NiFXAP9yh&S*i-oo2>2b=IN-oyL&03YHbY{nLRjK)4{6MQDziqG){ zzQk8JUFZD_d@J0B?f4Gg;|KhRpYSt&!SDD3*JZ1V~q7fRS36AhntB&Tv7AS5Q;}EnKwn1C8Lwj^U zM|47GbU{~i!$p367o)qd2i#aVa|n70d!aY_pfCENKL%hR24OIUU?_%RI7VP3MqxC@ zU@XRArw{WF(uET-36n7eQ!x!0n2t=$z)Z}-Y-C{$vh8%uL$+`paxfo@uo(Hw;{2F} z+1TjM-oW`5l~p*|n@_>1Sb>!|4fza<#+e3V}^monRrb2IG(_hcnVMB89a;U@O=21)#0;vN%%5e!K-); zuVW+Lz?*mrZ{rx%tTktU^S%pu=C&Ewh8MdOUMW7qL6n=%l!Xfxp zxDDI!9lpm8_z^!L+neX%U$gTdbMkEAIr!6C{=(nbft~mV|KcEP>U_wL0w{<=D2yUF z7)4PG#c>Eqpd?D6G|HeX%Aq_epdu=vGOC~|s^L%^hQo0rYTzgwjbm^uYN8fuqYmn# z9*#pA>Z1V~q7fGOa3 zh3DaXtiozsfHk-f7vW-Df=h83F2@zP5?A4BT!U+I9j?bsxEZ(LR$OG$dok`1-ib?u zmtw8(9;~+Kbph@dK7e(25D#HJHsE1Af=2^_GvXfe|Hm=K$DWF(giqrcJd1BFuG{dO z@Oiv|7x5Ba#w&OguiaTt#YNXJA>!emUrR7^t#rXv$G zFcY{9sDVAY5PQZyc2`A$eoQf4#iPLbMGOov& z!n1HT&cV4j59ebQp3&iY7HfnT;v!s(OK>SJ!{xXFSK=yMjcaf%uEX`X0XO0%+>Bdr zD{jN>xC3|MF5Hc^xCi&*KHQH7unrI6A*{y+Jd8*1D4xKRcnVMB89a;U@H}3?i+Bky z;}yJ$p}N+?uu=F1-o#sY8}DEf-o<-(A0OaDe1y%|f{)SPihTe+6K=)l_yS+zD}0S_ z@GZ7sJHEsB_yIrSC)}&i`|zvqH~fx2@F)Jl-`Ihj_y;w09cm$;1t~uYpdbpNFpA({ z6h$!<#~~4JD1)*nhw`X^il~IjsDi4fhC^`}4#yFwjw4Y6N8xB3gJV$>wNMxJ za2(R`t3jII&`{V2=VKK{+q{lJeVzLTcueQ&akTc9HfW1>7;aNJ0v&~&&>3CO72VJs zJMZw7yZy5127PSFc?E{or12%aN!7y#3+o$7>va@jK>6|V*_ekM%*O&O#3C%l5*&~2epWqjwjq*paH0t(;VR+PI90d; zD{&g`HQ_#-DLe~j;~boe^YD_lyo_(L4Qoue5Do44HNqvrOK}-4$3Tm~AY3K98rR@j zjP_?^aD(th+=QEP3vR`2xE*)kPCVw%9>-eYJ-8S5;eI@Tb$AdDVLdkBVLXCI@faRQ zTLTpB@RaarJcDQP9G=Guco8q*WxRq{@fu#oM!bPH@fP03JJ^JG@gCmC2lx;lVKcVi zV|;>7@fo(_b9{j>@fE(tH~1FYupQrFiiKt>eiZ(MpYaQh^zm!pci|uS6Mx}v?7&X^ zgMV?5-)}zT$G;ZzgRBV)2@9hL4n|QFLvb8}5-5pMD2+lID2#H#@;K23_asyjRu11M zTVdR5!hNV_!l8Ih_&kmfR>zU3funFVj={00iCUo*znBG8l6MCR0df_8)*^Iuze&~+@sA48o z#X#X848{-)#V~BgcW7v+q7g<5$6zeRVLT=v9TQQ&4pBi&@n=&p&4dh0Mja`LoB7Eu4oO%*O&O#1b5j#+e3<79t!3LXUcifZ z2`}Rnyo%TGIyT}ByotB)Hr~M|yo>knK0d&Q_z0V^1s~%Re2UMo6`$h^e2K3x-pXwP zuCU3v65pHf1DcziE%3AO7hGhqy%@g>|G?kaf!5yK1{=NQ4dkZ1V~q7fRS37VoAnxh4dw3A)~ErqSn8g0vi2|mR@48mXx!B7mtaE!o6j6xOVR>gRoi3u2Q!UUvaA|_!nreG?j zAp_HqiCLJ9EX=`NB#@1H$iaLpz(Op-Vl2V&Sc+v>juUVqPQuAJ1*c*KR^l|Ajx%s3 z&cfL^2j}8E47ccwz#8F&xXsYU?YIkf<1!O2#}&8|SK(@0gKKdeuE!0y5jWvx+=5$i z8*axPxD$8bZmh*Um}`}r!2QApunrI6A*{y+Jd8*1C?3P(cmhu%-ADCwyv%2W&*C{e zj~DPFUc$?G1+QWw-oTr93vc5cY{I*E5AP#O&u9)l5^lyc$usboa4SB?7x)ri;cI+@ zZ?O&A@g2U$4>(d&HSnYGC;W_G@GE}9@Aw0M;xGJ-9oUI~P{0bWAo6K6KMJ5A%9v0V zo^h7x*<+bSQ4Ga#2uh$NN})8$p*$*}A}XOW^4TTJ4-YKM^k}inBTyZSq+E=pSccPa z2D(~AyWuwB?WpU|>ftz~p*|X*AsV4EnxH9~;YSVpgqFfq*eU!6rOc+%=xstDbVMg~ zMi+ENH*`l2^h7W8ML+b%01U(+%+UNy3>6N;`NCBgDIA5-7=y7Ghw+$zr@Z-TOcGAU z7U9R3F3iLX%)~6rMiCs0YB&_xn1>w9M>QSmLj!U$hvf4>l}CbCYh?}#{+~GB%F*>a4J?{CCU~FD2LO8r=x1YfND5HcqY!m**FL1;yj#>RalJ?KJ-Xz zEfDZIY7`DQ3Kw|G8eE8raF0ayhS|L3Ok8ThWjH|tC*o@1HMkbn;d&(D31!Lh)Sr8DyWKTI24EB za2$aeI0{GO7#xe5sD;|7gSxl?YmkQeXn=-jgvMxsrnuOTHQ~ z9v#pTozNLw&=uX#9X-$!z0ezd&=>vC9|JHDgD@CFFciZu93wCiqc9p{Fc#x59uts` ziI{}Rn1ai31+MkQUx!TL49vtV%tjU#T96hYAyXS4K0K%aWihgR7}H_ zxC&R}8r+K8a1PGJjkpPy;xe>BTilL2a3}7<-B^oza4+t|{kU6cYw@7)A*{!SaH=2O zTHI(6y$Mg6@D!fLGk6xy;d#7(7x5Ba#w&Ogui6nZun2Kr0z;rz83wRDQg|qOw<~L%tFbi`q7YSrz9&#`r3$PH2(9PG_9gBra z@Um6fD_AC6j?1kCufR#dlW_`8#XBZ!!fC?OaR$!Bi+BlV;cUF2`8RQ%@O-SoYFvOd zxDXfNVqAhtaT(sxz$RQNyb4$28f5t4Ovm-Y8*n3T!p*n^7kbM@xLtS$?!;Yqz=U##7ruba!Yz1N_zG4D zSL0RTYj_wNM*%P#5)Z9MVu9 z4bTvc&=^h76wS~aEzlCJ&>C&f7LO|OF|-r5#~2IESacG0Mi+ENH*`l2^h7W8Mj!M= zKlH}{48$M|#t;m}Fbv0|cnl*j5<8T(6Jvy9F%IML8-B-6_!$#136n7eQ!x!0n2t=$ zz-(k;4(1|(Y&4L(A?68lFdqxB5R0%FOK?1vVi}g>1e}PIa57e4B~HWXI16Xv9Gr{u za6VRHH7>v!T!@P>$4YuG3i|R2;U5$J#g(`USK}J|~LF6Ra?ER${C0 zb9{xb@eRJkHf+at_#OvYxboo#;g6Vrbo_!}QC{;E@Q3hER1{XijT*QKmz!_}CYm{u zkl%y?D2PHRj3TJ4fhs5_ERI7^0wqxjrBMcDQ4Zx%0TodRl~Dy%Q4NRUFjV%{RzY>) zk*I;Aa4c$~7HXpo>Y^TwLmKMiVqAh+K0<9Y5jI6LG)D`xL@TuZ|2VkEXgjhk44|=X z+qOEkZQDl2PWr{@j&0kvZQHi_?#!P#?swO!UAuPes&nq^H-fy#hx{mj#s+ADf?^>Q zMiC@1@q{QU7DI8AKuMHBX_P@(l*2DyfPSOASOJw$1;?E7II4>^a6`O_a0rjO3iVJQ z4bTvc&=^h76wS~QtMZw7yZy5127P2 zELd6$5r^W0p-v*5LVAo)7>Q9BjWHOD9X9n&gz|MOG!h{(CTW?B5n4uKnm8RZFcWhy z7xOS53$PGF3^f!>#HCn<89p2*uKH?KT;|spx8@{8q#i@f|;%}67 z@^T1*pqQp*I*MD85(uRb8etF?18lv4h#*EpBt%9OM8!8leMcy#42?JnaS;#kkpKyi z2#Jx(<)=n6F*#BoC9)VQE7FK*kq+sR0U41AnUMuqkqz0A138fkxsez7kRJt55QR_} zMNkyQP#h&N%w`>q(qb8ubP1(UUaWwMsD#R>f~u&7ny7`^sDr=J9re)wlQ0=g(G1Pe z0xi)Bt3Xw-GedKf2;M-DM3>Z1WBC``l!gI>fGaVn-^I%Z%dW??pVxjnlvPn?eh zScpIfj73rpSq1niwB_lE+GqNBnvLQPzIqWiS;WqLr z6(G1Pe0xi)Bt*RL~FD`dvrisv_oSwK}U2# zXLLbVbi?21jvnZVUg(WJ=!<^nj{z8nK^Tl77>aQik6{>&5g3V47>zL)i+}MSTA~$} zU@4|xDyCsNW?&}fU@qoiKI)<#7GW`FVRiuBT4xIFcJBFeJNtwU_;p`o3HWX6tXL2( zfQ7geA%M&H7CwOQ$fhMbav&#uDf~tt&4CdFL6J*KZUhs9BaiqO@**EXD1<~TL&ZiI zF)YF%Jo0NUfPyH5ND7frSS*5~D25aYDN$T3fmmW}#6eudLwuChQU+yF4&_k+6;TO^ zG$%&YNC8wsRfTG(jv7d&B{^z}wUA0ojSFD{xQO}+4NzL44Ax;i(jYC;Aw4o6BQhZ~ zni`-P)>*{$C?6$&3TUp-0+*cfGFpnQkj((uk;WECi)fJph>o^e+F@SA0OsR?0Un~g zLI=dq920pA^%ufeoUrJqr4#Zi6hI=AOpIWL3XY-*#qhW0?kFynKuMIs3%o=xExj?v z)aIhBLOJwN=!<^nkMar?FhCrLiee?)!fjMW6;wqvq%hr-2xYpVF~k5vQCmwLJi$}^ zqwqh}7aL%hI2CvoQyA(ORJmQrRl0k;1a4L|cV+sO`qrL3^NT24am9 zt;Ha5FxH9dF+?1SPZs+#HYjYwKMMcDFbqda%hU=Z#E}?<(YRo07coX0ix2pSZ3f+r z0|q#VaSG#c$;rcLCZu;!es0*)LwiNKVurEV+LkopO*djB7Vgj%*8y+ z#}O?@u|QmiXo!wu3dgZTT#98_julvmRalKRSc?-5JBc`!I4-^^e8(w;(>Q~(*rKo% z=fv~4fQyJ?Nur{&MeKr2nm6N;mdn^CZpRhzDt3rFaYMX`Teyu~3cGPfyo){JUetC| z>foNjeLTQJ?9;Lz&9$_^A@MMtX?cz#;!zyKalBG^jW>9UcX*Ev_=r#Vj57{9i!b6= zH1!xX!#RcXNaW>~7%?nZOk7d8ifg!zUk3P%K$-(1i$YdxQrL_;;$8fU|8NiYQPjzc zA%p=!;*r8*)I~j%wm4<*Oni=Tn#1D+PNE$b6>k*YB9cO6WD&C>niw6~#O%m{oG9pPMIn?nR2gLP z`pSxiE~XI<;2`oi>@O_PvJh3pYRD#LM;xb&i+G5S1W1TXZbfD^b{m_ZpaBY@Fp40N z=EO*Xq$sLT4B<>XJSsb76_i#egR&@xEr#EU@?r&~F;rSq6f2=Js^EsgO=LhuR9C3+ zr_VA~K_-RFsHsp3wNVFkQ4iU)WJi6m0UDwaawz1)5gbKhg(hfIB`6}AS_~-T1-q3Ct?yNqp`!9V464` zGcXggFdI#^G{YQmE{6G#8;<4*EighHiI!q3EYZ9a%di}+7205xxEgD)7E>@4k1XP2 zEYQ3Vaa>GXY|ye1?G-v;i?|gX#ZLI!M(U2vVi#=FvK?K;Zh!jF9X-$!ot?Z3-dOgx z*yXU@e~wk?hQ@|(f<6xGi#=NQB0maXANFIAmcbZ;q4)>?LntQ-jbY+&9C6rD98x%p zyvTm}09;#SQT$f|+D+EHc1i6!(ynz!GsOQXw^NIqWt{YAJ;~ z;$4KYRYK#Qcpobqwi2tb8f&l?6M>ZFo9sgRA|IksP69U;Z zfe{SBF+s~j>~RD3B8WmzjB(glgb+jGn&#^`WI~4#MjyXT-CJB1T0tL`Mw7#5pbJ5lf7X3*tqjLwZaz-RU@IYUgo9;VQ1-I^r21 zK5mJ(aR+x1M!TjWkG$cUs>68Ba`)58_AM!+m5>$cTBEk1WWFPg*|X3%+88rJIRg;%}@m z$+ZZi5Ewxa6xj@w9XXH_!4!fc1VSR0LT;=x)OzF*|3V@$F~VsMkNgS+P!NTX!K0fI z#Zdx<6^fuJilI14pr1?Vk4OfMj3|hTXo!v&h>2K;jgkf^g$3e5yzrWPiTrM30VGgJ zi1L~%Ad#3D6~#(OA|^#;u?muj$&msnkqW6%RZBHgM-8M?NROIgEo2ZgA`>zr3$h}T zhdnZKh&fT$Vf9cS4UkJAHyVnKkXOuy#$ppJ@>nfKeuV;Ps?ZEu+{Udas89&y#R^EG zB`FFk6hUjT4cej|+M@$EP-mO~G* zC$fpz(M#-&K?WF%iVBrb8CB3vOMg7TLkz$`bhR#xaw(*6Yg1y1 z!c^3CqI#&02AHNW9Sy}sXpAP9p)eD(FdNMkTHuBW-TbpIVkl*g37fG6Td@t>u>(7? z3%jugd(q3Fz0n7Ku?xF#!7aOp1DX$FfWklw!e9)+Q2c}c;hxQPAH&4q7=e+0e)71C z#3*qz#$YUtIMGoguuKUtPGLNbDICWMoWv=d#u=PNXB(soiW;C8_84F$`j^HS6XugR!Vq7f3Vl2T@EW>i#(tI0t za2NM*AFH&i#u}`}I;_VdEswED+>9;QiYE$Bk;jJn3(v&oc!8JLrg=MdU?)zw{L|Ql+=}^l zCBDWRyu~oX563(4JwD(g{x(#1WI#q-P`HRs_>3?3iq?i|gHafbY!1tg9LR}W$c;Ss z3we6qb5EM@=#8VX1TnH;PuS8+72nxF0h42f%5y}9eQB+GYgb~A{ zq*w~2Q3hpE4iU6OL`AU@Dx(S_DMZF4)3}T^;#ySIQVrEn12s_#v9!egbF6q7bx{v- z6yjn6CZeH2BP0+LqOsTniNwTcDmFtBF)5mhEl}AaR{6896q2L0LL0P2JETxZiS}X# zbVMf%v(1L1tJn>zY~|HR0M1aS#Lu>cD(N?|m{U@XRAJpRRhC~2rtD2+0hq%axf#R`Za#>6yC#|+HGEX>9u zCx47BE^8|?AR}fNY9^|p8s<4{KB|i~P!qLK8+EY5t=)-5nir$4LOs++11wWmj)r0* ztP)qFvDgG@JVI$POPr0SXolu!ffo*YiI!q3JaO1lY*5&UP1uZMIF4=Dj4`n!UhKnu^it@JvJNYUFXC4Wz(B;(92fwa#RzdEvWQtRMjVTC;(3e{$K#vF z;5+^m|3hspbxY6w9z2E3gu4EbLmW5?5n_0VblbTTuk-6xQR7!dq+* zH{zZ69v|=#n-n&qv&XLsJ}G=g2!)XNB7VgdaVx%w-|+)Ku}xt+7Maju{8IRhvI^x8 z$cX|Y2!djl!ftdFJ7JHw7a_!u*eCAC0USgqh0q9tusEb}7`MdRI3gZJ1Ti9x;W(lo zDo$xRjWallb2yI&S{~w(cp2$jV0y$*h>0r-R}sWR5)`2YC^kYoE%DLIVZG7A zt?h|_#sAP;a|^UYEBw&%6A28E5Z}b_NF*l44mV{dB3PV=C~MGiIDwN$ra3uMASK#5 zQ9HCp2VAq^t|OI})JTK0DDELDfvzsK8{QcHEi!4zj4a5Ct_JOfOSp_23OUgSeG$QB zMMMuRJ&{)-AL1#*M=yom=!3rKhyEz6xd@7)7zQW|L~*eMhKNIP!qT0@KjQx|48t)3 zBT?F6Wst?CW<_Tc>Vi>PMq>=dVhX0>U;Kv&n21T3j6)XtFe)0p5-Ot#a=4*6F-`My z1atD>m?6%@0C6CKxtQR%VQM!qPs@B%HE1OhuG(uimB_A4#O|U}qO0;(QZO~Mq8P+JQ#lH^w4;#ddI3ym%CUG;G zJFEq^iQ5sy#YDvpaVK(_Zf>;H+zPcVT^+O*+n_DlA)Ld)W00W+*%;*b@!iibm*Y`*cDth2F@8+_>YgyZA@^AMT0w5e?BXKw%&r ziI4FFPw@=TF-Xf`ybxdF6<*^F-r^nJBbAFujZ-$mX&l3G3^CMD^h1AqaM(u-6Nd-T zt@Vfi`*z-2&c7)*%CARb48~#{#^Yc7hY6U7Ntlc&n2Kqbjv1JVS(uGEn2ULsj|EtW zC0L4OSdJA~iB(vQHCT&vSdR_Zh)vjxE!c`}*p408iCx%@J=lwV*pCA^h%?axIEzE# zVI09x9K&&(z)76KX`I1XoWprsz(ribWt@r;z-e3&ui_f6;|6Zx7H;DX?&2Qq;{hJx z5gy|Sp5hsv;{{&g6<*^F-r^nJ;{!h86F%b$zTz9c;|G4?7k(p0)BtiK2!bLQf+GY% zA{0X7MzjEKB8(Uo;Se4X5D|Is7os34q9HnBASPlVHsT;I;vqf~AR!VVF_IuDk|8-# zASDW-5Yiwm(jh%EAR{s%GqNBnvLQPHAuw_wHzFZ2@**GdqX1GNH3r#)gHcp0hT4JD1)*nhw`X^il~IjsDi4fhU%z+ny7`^sDrwwhx%xMhG>MwXo99_hURF2mS~06 zXoI$BhxX`zj_8EW=z^~3hQHArJMZw7yZy5127PSFc?EH6#w9V7>3~(fsq)6 z(HMiV7>Dur7yn@bCSnpMV+y8X8m40gW?~j*V;<&X0TyBr7GnvPVi}fW1y*7eR$~p; zVjb3F12$q4He(C6VjH$&2Xw>E3`%%v_(6#M+bC7Cv-*^bVWD( zjqd1yp6G?%=!3rKhyECVff$6r7=oer2miw`495$X`VynW(KzG-=rG2KOvN-z#|+HGEX>9n%*8y+#{w+GA}q!dEX6V`#|o^(Dy+sDti?L4#|CV~CTzwQ zY{fQg#}4eoF6_o0?8QFp#{nF~AsogL9K{Ko#3`J{8JxuM@EKq5 z72oh3KkyU3@Ed_lJTStTMpy(9gCZD$BLqSs6hb2m!Xg~Pqn`o#Ba#>yQ4kf;5FIfP z6R{8*aS#{r5FZJU5Q&f&NsttGa2F|%5~+|HX^$Aqt`p3Zn>$q8N&!1WKY5N}~+Qq8!Sj0@hi?^{6aXK~+>kb<{vj)Ix34 zL0!~CeKbHrG{P&qMpHCHbF@H9v_fmNL0hy#dvriYbV6rzL05Fc-{_7W=!stFjXvm$ ze&~+@7>L3izakhS4#hwCABJH#MqngHVKl~IEXH9x{>6WofQgud$@qe=n1<#Th1K|hpIC?W*no}Lgw5E3t=NX`*nyqch27YLz1WBS zIDmr~;}IW=BjQmU!*QIzNu0uIEb@oqVw@At;{qk@5gy|Sp5hsv;{{%#F`D2F-r^nJ;{!h86Fy@KreY1&;un4+kjE-8f*>e@Avi)H zBtjuHq9HoMAv_`=A|fF&q97^?q7Y&rCSoBr;vg>KAwCiyArc`mk{~IPAvsbYB~l?Z z(jYC;Aw4o6BQhZ~vLGw6Avo_0a$g(Fl#v1WnNl&Cvoa(F(2627z2w zV008aA(R*zUBzzr8{N?ZJ<$uj(Fc982YWF915w*hbudI6ihuAw48w4Yz(|b3XpF&F zjKg^Ri~leI6EO*s(H8CS&F24(8RAUL!fZS;p~sjf&c^~Q#1bsUE4;=Eti&p;#u}_e zMJK9+H{x4t!e(s2R&2v|?7&X!!fq@wz+&tZ_u~K#;t&qw2#(?yj^hMQVvdiKxi}-9 z#W|eE1zf}>T*eh##Wh^V4ctUURKgwH#Xa1|13bhdJjN3|#WOs|A}q!-EXM|H#5=sl z2YkdQe8v}i#W#G%5Bvp(&c7Ia**42BS6Fpe@>A00troqM{=@Ar9iA zE4txtbjK!aMi2BvFZ4zq^hH1PM+~KI1-~U8e=dP<1ilo z;y+BlL`=eDOuc%))HU!CcJ4d@R61EW%Th(~ygCwPiyc#ao%i8pwQcX*Ev_=qp~ zif{OiANYw+_>4dDubhyECZ!T1ON!!QiTzxWU3Q2}r87Sk{tGcXgg zFdK6)7xOS5i?A50uo`Qy7VEGc8?X^uuoe5T9|v#{$8a1ca1xhr8P{+f_i!Ii@Dwlb z5|c0)AMg>M@EKq572oh3zwjHu{FnIP2!W6YgRqE!n23egh=aIjX@FKpASOf_q(wTU zM`0~RP!z=wNg*;yi)Bz2F$GgG1VeELcd-Bq5yYhi#WHa@R$wI_;V~}YB08cIHeoXc zU?6s2Cw5^s_Fyj#;V_QiC~o04PT@4p;4IGJJX)g-Zr~;!;2~b&HKMtg=tzN-2!y~` zqGc(nqXt4LghqX_0m6yl5dje~MqwsSCK)?7(lny z%K~0peLElc^U7M`O|}x(a2>m_8*8u@r*Ik@uo0WE8C$RwXK)rfuoJ7W8hfx8`>-De za1e)Z7)Njv$8a1ca1!gV9^0@T=WrgEa2Z!{6|e9bH*gcTa2t1U7x!=<5AYD-Vh0c& zPsFEqhUa*Jmw1D>c!&4+fRFfu&-j9`_=fNJfuHz=-w5QCfe{2j5e8uq48aisArT6p zalrr=@d%F*K_OxQ-CD=aZ37(qvjO4;{Mzr3$h{`(jr{|-CBQ1Y+{H0Oe|EAfM18kw}9Wa{*E8`iC_4QKw1JL2!bLQ zf+GY%A{wG23ZfzcA|etZBMibK9Kr|Ct#zJ6CU*GG#PSCG+&UlfqW}t`5DKFRilP{b zqXbH#6atxhV3ZZhp(8q>JSw0fD&daiyQnNyK~+>kb<{vj)Ix34L0!~CeKbHrG(uxE zK~pqCbF@H9w8A_F|W~`_J#N z7yGau2XGLFa2Q8$6bB4&5GTZwIEB+VgR>Zh;kbZ{xP;5Nf~&ZO>$riNxP{xegS)tg z`*?tdc!bAzf~R{JHFvN=3zb-U?CP^F@E4De&II)xxBy#hTvGDuoSm&8_Td9E3gu;@ES8P6L)YI ztFRhtuofW<9}?@t^#~<~#s+aC!iZrJ4&kvyVJjkt5wSzuiCx%@6bdP^Pu!0aIEh#a zu`x-UjA&wX#6V0uQFw|&IE*+7aS;#k5d=Z;K+8iM!BHH;aU{@^5NVJWNsttg5E*AQ zpT#C@MlyxuI47RRJ>16yT*M_@Mk>vzaYej}Yq*X?3W;$;yoo*7i*yRCNQz`gjuc3VIEaf3$cRzt z0vL^4Vs0crLgYsQltd|%Mj4bvIh02QR753IMio>?4b(;*)I~ikN*};tG!z@5F`A$~ zI-oPUpewqe7kZ-~`eP6VV+e*~7=~j6Mq(63V+_XPU;Kv&n21GKj3ro#HCT%c*oaNo zj2+mCUD%C1*o%GGj{`V}LpY2hIErI9juSYEQ#g$axQI)*j4QZ`iWvi_gnQzBJitRd z!*jgAOT5Bsyu*96w=Fv0oA@0;k_HeI!4Mo_5EkJO9+40kF%T1R5EluM5Q&f&NstuD zkQ`}{78#HcS&$XkkR5rE4+T*O#ZdyKQ3mBv0TodRHBk$-Q3p*^2G9)k#Rh1IMre#C zXo_ZNiB{;0F6f2c=!3o(f}!{a|HCj0#|VtXD2&D!jKw&N$G`Xw`O^eY0JFr|n1i{P zhxu57rC5QLScA3LfQ{IJoj8DlIELdmfs;6e)3||~xP{xegS)tg`*?)Mc!H;RhUa*J zm-vS7_<^7Jh2IF2EP%iWf}jY2kO+m)2!pVQfQX2M$cTfu_{UBD9}b93@Z|8HvLt~K1VIrD!4V3f5e8uq1yK3CO72WVRx}yhrq8EB&00v?Z24ft?<6r!TNtlc&n2IMp(4Jz3I1{rl8*?xhtFRgy zun~K(7t>M&FdZkvlQ@I3IEVANfQwk?<9IzTiI;H=*Kq?k@c<98&1-2po`_HJ0x$6j zukp*p{Kh-+JwD(gKH)RI;X8ieHv**yATUB9G{PV(!XZ2&AR?k6I$|Iu;vg>KAwCiz zF_IuDk|8-#ASKcuEz%)9G9V-JA|DE(5DKFRilYRg`2raorNuIcA;v^iu^MWk7V4t` zTB8j*q7(i`cl1C{^g?g+L0|Mke+PU@dlFC!W|ePjNy#i3_-hOSp_HxQaWtiwAg!CwPiyc#aSFh)?*8Z}^TM z_=!MH9vIhrZM=?PVsL~&NQ6RYgh5z@LwH0$L_|VlL_>7ML0lw2LL@?BBtvqfKuV-R zT4X>*WI5;(J~bwd;toHW#V$I!fLF+TCBr*Y`{ir!B#x-*gwV|aW4+xFpl6Tj^Q{?;WR3m zWF?#v&*KWN;u@~w25#aW?&FYcaTw3U=Ximac!k&afuHz=-w0wMf+7S$A{@eFoR;xO zCMHKBBt{Y>MI6LM8l*)gWJVTbMIQWxyvT==D237}gYu|=il~IjsDkRKfdgL62T@zB zgSx1P`e=a0Xo99_hJNUe0T_ru7>pqpieVUz37Ci+wn|RC@CU+6%utw#IhczDScpYf zj5Sz`P1uZW*p42yKu_!u_hKLRBU9=CG9$l}7r-Hf!#IMYIDwNmgR?k?^SFYmNN3{d zaZkLD7kG(Rc#U^>j}Q2WFZhZsUZ5JNB&!!R5pFcPCM8sjh?|KdMPz(mZ$d@R61tiVc~^Hu0PR*9>z25Yea z8?gzSu?^d?3%jugdvO2b1VSPdLL(f)BLX7gT&4idBa#>y(GVRm5EF3_7x54uDUcGW zkQ!gSBEKSom=T$f899&>xsV%qkq^K8q4OJ^ZPqR*s!$9iQ3}`G(CeryRzY>tKuy#_ zZPY6Wo zgvpqJnV5(9ScJt`gSA+P_1J-(*oEELgT2^?{WySwIELdmgR{7V%eaA?xP!ZRgvWS- zr+9|vc!k$^gSU8x_xOO1IOfqfju2@B2#HV#jW7s{a0rh`h>R$RifD+AScr`{h>HYB zh$KjgWJrz_NQn%{h%Cs8T*!^Q$cM5hhuWxvx~PX6F6Jg0i%rlREzlP2&=H-`8C}p7 z-OvL)(F?uN2Ys>5W3V2pe8{cF5QU-m2miwejKmm>#W;+|zxWT6Fd5HG{5dulej^qs zEXFb{$2zRX25iJ0?8QFp#{nF~AsogL9K|`D#|2!(C0xc8T*Wn9$1U8(9o)q|+{Xhv z#3MY$6FkK`yvGN8#20+UH+=u|lgs~(U*c~B@--?jf*>eDAS6N|G{PV(q97{bAwCiy zArc`mk|8-#AvOL+cVs|DWI|@-Ku+XBZsbKigvt;=XcQC+p*TvQG|HenDxe}Np)#tV zI%=RMYN0mjpf2j6AsV4EnxH9~p)J~>BRZiox}Yn%p$B@R7kZ-~`s0R8coRd!p%{UY zIELdGgRz)_nV5sQSb&9CftA>Rjo5;%*oXape&=InD~^aq5yQt~Oq>u;;tbB>5-#Hk zuHpu6;sG9Fnm>c4CS*n`8!k2Siv>^+g%BZg01;7CEQaDJfzl|0 zbOuO|@?r&4MK#n$9n?iVG(;mbMiVqeGqgl2v_>1WMLV=d2Xsak9I(v}Vi$I!k3wJc zLw^jwKz!5k9mB-o7>Dr~qc9dz#Hk2oO9jU~aX!BJF#L`c;!0$*5ZRHzSGtVYqp%l8 za1=ir_7i8sv&iqT0=OhzMnSO83HZz^7hFYyYm@dj`44(}1e)MDa` z_!U14@Do48pZJB}2xJ<85d=XI!;-{Ae%reM!YG7AID|(eL`F13M-0S7EW}0}#6y5wkEG zORyBnupFzf8uR?&IUifZt=NX`*nyqch27YLy*Px!ID(@%fs;6e(`fDW&;|u^a2Ypn6Sr_1cW@UE@DPvi7*FsNFYpqt@ETw6 z72oh3KkySlytsoR1VSPdLL&^qA{@da8loe=0SX|Am=wv79I22RRei`+LnbjZvLGvR zASYsZgkmEd(jzbOVVAF_yYUY1am-8WI5KI;j4a5CY{-rr$cds}3dQiw4S4@&KPwc$ z-~OoXj%6-uIjSmD!!}>Hwxgz4D}aE1fd7^D*QoEe4G_%(7#&T;W@wHUXo*&6jW%eD zcG%~__M@ZN34fzIdY~uzpf3hsAR0Sm6O{BPLMco!;iAWE=INM$nYiS&bs2NS zxmbXOScc_Th1E!9lBuyp+=^}3jzn&0V&rwod?@SdT{+Bi*nEuBJRWCo7UystZ?wEc z5H~F-ZYkWx9o$7*g?9KNe#H;`#1kifia>5uU@XzR6w55razs#wh1i?IyLF+gD;Hi?_D4coB`yHV7#7sCZ7zlbvmXK?`+aS4|(PxE|S6R+c(FZbv1 zKzxV z49SrKDUlB8F-P-Uc7(O=gruIn>lr3$;-P4HX)pzSsbL#J*@M zHbWGTOH^!7*oZSYi?&+Yp*=dFBRZiox*(j_YIuCWM+{IHh;bN?ehU3DL7a$5n2afy ziW!)R6PizAfw&M?#H&~#uEZLw#U^aVHf+ZZ?8H9o#{nGt^ETh(~ygCwPj=Zeta^6W`+lKH?KT;|soGf*09Dgg{8NMLUE; zctk)%L_%c5KunBrlgA>Jm>L<75m}HGIgk^1@E1y>3@V}$Dx(Ujq8jR>0UDwaK3TfY zXf3wEzh3|Up`+Ld$He2fh1=+(&=>vCA4yz7QVbD?;vf7E!!R5pFcPCM8e=dP1-+mO zA&n2Tw3uYj$(W~QJ{Djh7GW_~U?tXJJvL!8wqPr^VLKXn4L8CLaVK_RH`19#dK?fB z;uKC}ix=ZoToJG04({R^p5qI?BCi)pJ_NCdK@kkWQB$E7B8ia^1yM1<$Ie82!e?Ys z$c!w=ifqV^9LR}W$c;Ss3(4I0H3aE@KsE!(#;jo#g zE!II@)I&owLSr;RQ#3h!e3z+=@BkTznEgV}-a9>#!ahuo0WE8C$Rw+prybuou-$ zw+4=gM{x?LaRz5`4cBo4H*w3Bx{X{eKllIHyASv%s=eXkprD9~6s0KW7z7dsp?3p> z6bPhX3PsAgB%5R**$ulJLJ_div4DzzQj{vq3Kp<;DN3=SprBwE?A`bOo3=B%n-IMB zxzF>wpC_Mtzj87&XU?2CbIzGFXC`oj1xMi+9EY#qYxoAfg>@94_3%8Ky#T+k;8!?C z@HpJYDz`&@ib?}mK#^YveF-K&LoUun&=i`%Z}2<3Plg?YXo4~7@7U}h#1d=^?V$s7 z=d$ks-3j)9`$*9Ja3#Tb=nZ|KFC;)eNQ5*Jo(^dQ(;)*gVK7_;S&$7wU?{9$vz0KC z;3yak4wympKNCiB@)-;-J;1hzM!e$P73(R1_OxVhTZLl47z#l~V zCp=H^1*k#wRTFj++y!sKTd*Gvz}xT+9D>8}5qu2C;5eLslkgpU4?n<<@DuzD`CQ^I zs7Ar84n0}W3${~H?0{M7bB|%i(Db;TbrK z%lT{=#wx?%8gN1Z6haZWVJwV;Vkm+36!Z?zmtv6s0Tz_OWS9bTU@pvq`7n|Tc9dF| z5WF4khQ-j7gKh@P2rh^F;C{G*9kqaU1lPj@umLv0CfE#HU>ZeWI;?`#u$=`vU^nc6 z8z>An!Xbi(;UoAMo@K#v@Cm_B;WPLgzJM=b2+3BBM1RnSRDBbW|95!KI-MKBw3 zAQy(fP`I0eUJNZcWvyT|3mj0usVIao1oL13tE55>Fu`mwyQV92f zhoBcOrMqw$_z3!80+d65!!Ltr1gFCda3d_^V3xxyg0ta8R(=Wog1@0L)n*e|!YWJE zZDBbKW5IA(MQ}Ad02^Q!ENw3JOMAmEAT4320LLF?1nwC7xuw^H~<-t z3Gc#t@ID-bV^loH;Sj;YFp|?V3cjJL`WCJwI2AsEkD(e1szVK^39C4S)i9c<9MF^n z&7dx)ryfKTjDgnB2HHah=nCDSC-j2e&N9E3lW~KG;og5A25n(2^>=6}(UIARL0ja14&a7w{!K$jT4F34$l# zRf4a<9|Zq|Gbre1LM=E4YQwoOnUgmK>JqF64JnR|;9`Q8KvQT2QP3P(LMw=d7-$V` zU=^olHFPGbF3^z$ouD&xfv(UEdO|O_65?S1q{2oD*CrTC!5jyzIn*|g#VXk_42HvK zaKQ5<;{_-sI35B}1`D_h7D8X5N`M(Gm zioi*zPl0OyOOwu3K^D)1&RnKlpd-bs6Z9lOz2F!T9EZ=~bLh(j zkpTCx%Kh*Q3x0*);CJ`~{)7i$1H4ROeFe@VBhH7GG`+1LnkqL28nU1fG=?V76q-RT zA~**&vhz(4&4L(+g|-j}?O+TCm>@IwH~U;>oGG?)%eIn-t_gWyb<1+!ru z%!dWA5SGAFSOF_xBn4>{j3pz+!A2Hrf(PLtsKFuBgslX(!5mJ?TzHY-OYl0p0X{Oq z4-+}8N$@rc-hp@FJvhOFlkfq-58)6ThL7N5c%4<=fUgLC4d1}GZ~{)kZ}2;#>m z3;aR^zrqg`(I4ST7R18@4zL`CQ7ndo2fUEPD#?%n{b2y4!ax`VX^;*XkO?`E3qxQi z41?j|VV7PQP0#^jAP=4;bDo2FTqN`19v0jSKJY^T%3uPN!*3Lt-(fPrDNvtky#Wk? zp>P`uZifj3%i#`!cR~^kRWc-TLX+W9R(T9ou*ynU1*>5<3--W9f}5ZYhj0PZhX(L4 z3m$d7Tht~sL$on0FJQW zD0~55!U;GDM<^gi;V*)JLk%LR3A5PcY^YDL0kme7Hqek@BWMgw;8M5@9%i#g;BHo4 z3~M-{YoR5pw1THu@ClI|=TB-{5!H3;SR*g=P!9P4FFf7v6*S z;UIhfAHpFx3?ISAa0HIRC-5m8gX8cSd=6j0m+%#Q4d1}GZ~{)kckn&@06)S{@H6}Z zzrt?V1AoAu@E80IcX0T3LNyMsI-CJ#LJg=1(X0{!wFsUAwc%Vi56*`=Z~@eXdQcx4 zz=d!TG=xUb7@9y=3P?A&nBXPQ6q>=Ma2Y6YIYdEoxB^<@D@ylDR3=Jg=sJyu7m602DlM!f}3Fm%!FAm8|J`V zXv(2BgLwq!!!2+t+y=M90$2!lz@2b6EQTep6z+k0VI&vMC|E&oCEN%1!x~r%>tH=> zgiUY=4#R#TH~46nee@EW`hZ@^C21-oGn3}EM}Fp=OS*bDn$KOBI!;T?Dv-h=nyAbbEHLQ7}` zAHm1)34992;5d8+pTigMB^)Ce$Ke}--@*wv3E#o@@B{n^Kf%xN3;YVd!SC<~{0V=- z-%yR>QytELGoc35gtOpms0HUhZ8#UsgY%&dTmW^U9(+T>zlE>Z(bsSx3oe3&&Ifog149WEt!87Oc$L_u@70$M;zXa&&_1FfMA#6nw$gLcp!IzUJ01f8J^bcJrv z9eO}d=ml3oJoJV>&=(S*A0$E&Btr`HhXIfZ17Q%PK{{kWCJctFAPce~2XbKu425AZ z9Il2DFcL<=XmG$7vNI3H5X^&oaH;>|GIc=_K{t$raWIVq)8PiV5lUGw9(>@30F=Q5 zD2F6Yd@@WVI0+`h6u1_q!Zer;*TMC01KbEV!ObuOX2KC7JqmLO&V_j}A8vtLVI-@J zf&~N@!X0oY+y#r^ZdeRUU@6=K_rfw*4l7_K+z0o=Dp(C`U@feJ_3!{}fQ_&THp3Qp z5FUbu;SqQg9)qp04IYOl;7NE2w!;p18lHh?;W>C7UVsFZdg-AgUH{1`(VIHJ~P( z1!qGPt2BoUE{IGxj|JyL9k>9lgX`gKqJIZ2BzO@tghtR9n!v?y2{eUfa4B2{3S16R z&>XIS7SIw}!5UZ#t)UIXLR*M~cF-O=Ku72VouLbKg>Debp|*vd1be}i5D&ef5A=lu z*vhHc21x{yVFKAy4y#zO8V0gp5TrpmWI!eihN~b8vLOd@VF(O`VK5x7h7m9lM!{%s zz%}4h|HY|rLOwwk6hI*qfg8rcI4Fh^@PHReVLbT24*@8HBSe1`CK8+klVJ*63sYel zOozHq4{m@P;U>5lX249C1+!re%!PR{A8vtL;WoG(o`vV&MR*DBf<tcH#hkWR3U;CgrfHo!*M1P{VP@Gv|AkHTZH6}G|S@B};wPr-KB z0Z+p-@FYd*DR`dX3-Bhvx8P-hufS&50c9n17wSQM zXaEn8B^#h2!A8&+n!v?y2{eUfa4B2{-*M32LoJef4zy%JD~N^|$bxL>&MG}1j$k`z z4;`R0bb&Qw`&#Huum|*nUT`JELvQE6vE24bNt#6dgg3|-(#h=<A4McEHo{ z3_J_Z!ISV5ybQ0vtMD4^hCT2$yaVsTdr(Z_FM&@8ehOc}m+%dI3n$8pv@Hf;Y(t1!I8olKKN9Y8dp$l|{ZZMW2 zKMwj4OoR-`gsWi$j0OkfgA0nl4bxycl)`ujKp9Mca+nB{;5xV-Zh#x%Cb${qz+6}W z3*jzU1oy%HunJbg8dwYKU_Cql8(<@Bg3Yi69)?F?6^FkXwi4V1+hGSh3(vue@Dl8V zUBa;H)v8q+I(9eVJ@6*H1$*HDybbTc`|u$gg2V6;dsB6u?t5G;gQ1ZTrMm=CwWt#BLM4hvu*+yQsOU9br5fqP*YEQkBxe)xkO z{RwLbu7!259tLs=fI-`AA`r?3D^!h;AwaU++^KYc#+^s@ID-b z58y-i4!(zv;A8j%K80g&96p23;VbwWeuLkk8cC}TuM>O&Y7;yc&V%#eT^76tbqUsk zhR_IFLMvzr&0q}VK{UibN9Y7yp&N9E9?%nd!4WtL{U8yNAQ=Y2AV`CBSWAYjgTVx^ zf-J~}VK5vXPC&6Tx0@uP+m;p0k0Hnfg za66eKl!t1+;)G zSr89>ApugLKMa6WxS6EQfE|i) z&V(9p4%CM8p$=RCb)g>Ahfy#ZjuX{q(1hT{a0xVp7SIx!!xhj9qMB90n0ggLKG%_AKZCR}svDY{-E-Sa2r{B{&SG5u6Ua zNk$)VvS1AS#DbsUH~1aKv%m*_2*4zm3=X&kro(k`BisbH5dEz%o8TOn3-jPf7CZ&} zU_UHn!98#|D058I)a1367SK*u%k58j7^ z@Bw@Xhu|=L1Rui@I0~P@w{QZE!)Ne0d;wp=S8x)(gB|cRd=1~g_wWP!2tUC^985#_ zmEdpiJNyBE!e8(=RAap}p$5Ff?_P$Q1kZxAp%%Qxg4f|(g6F~cPzNr6x=;`5Lj$-F zo@2A;p%KBxLfP25O*qB7)Ki?*RDPWe7s_wPUIY!H5j2J-a4}p011}Kg(!gKIEyZ|r4OYkzh0kjw;S=~2j=^#G3_gc1;7j-lzJ_n$TQ~tH;R6!> zA>2VC?}Q&%@FQ%bm~Dce2>uKc2~L9F2>uR#z@P9J{0-H}_Udp3oC!6cCY%LlLoGN5 zYQwp39-I%;*wJ*TL+}Eq3-#bP68<~XC)fZkgp1(5X2Si@kYFPi2!o(I^ni=u5_p)y ze*{*s%hiy6z_kRY!Zer;*TMC01KbEV!OgInQ?UnT5S$5L zklZg}7Qxvt2j;>&m=9w(@p*6y!CT=rxE&V2LU@^jc?IqwxCm}zm$$UxU}-4cH00U^nc6H{mVV3;SR{9Dujs9e5YsgZJSe zd;lN9Avg>l!N+g}j>0GKDIA02@ELp#U%;2}6?_ffz_)M$PQrKaJ^TPa!cXus`~tti zZ}2<(0e`|@@HZ@@NG*r6$fmR51i_O~i{Lp>8_tFE;Cz?|^Pw)(gZj__8bTwu7%qXP z&&;dF^C+G}apeuBPp3n<=Lm%i1 z3D6G`AqkQp1^UBos=htYluNA{WUwF;2E$bl%>@wyIRtaz9*X?EFpS`Ec%2B|fV&7T zf@{DDV;~Rm!370S2u0wAu`mvbp#(hOg;E#~KJddZiokGKLU1Wegh?--~zY>ZiU<6c6gL5c?|9#cqfd6QE)dbh6zv(_rSgI zIGa5ID+sQH``~_91*_o<3iX-r0KpBg5jMeQ*a8p2L$ClAs=woOKLigGd<3p0I0Cj3 z+y;-s6YwNF1>0cEu4Up z@Ev>)%V0VD06)S{@H6}Zzd{iw$_*ay!e1=-8>*4q>Tm{}2{oW5oCRk?EjR~i!?|!C zoDX&20@zFR`=CC-25=!<1P!4PG=?T{F*d>BIGFcfYfcq`mZrMeiF!E(5R1$V+-ut;4G7Q+%)3Qx20 zGf<64t3w7^k_jtVWhLAP_ro{vE!1G;ny`-GdUyafz*#If8#WW%0uRDN@Gv|AW7v5f zyvin|~{0zUqudt58S`U8^{1g6yzo8lns>3vx4mF@AoCRk?EjR~i z!?|!CoDX&20;mi1pguH!F^~rr!bQ*!8bM=d0vE$2&=i`%rEnQ2a5+RlbGVe#cp0=L z*b1T{23kWKh=sNg2koFebbyY~2|7a;=nCDSJM@5_&e zGzzXGcs<+zH^NPDGt7XQ(3^^)56mGr7ha{JcnxkLcqX(K87Q36h47Z;TRl;&){=-lPrD5q^T7;TQN7euLlP5BL-Qg1?~}5mbjW;7q6iHQ_8c8*0HhPzNr6 zx=;`5Lj$Nr7FUN030?#Zp%FBOCU7xa0!^VAC>+-1pb)$qqM$ik0WIJjxEESND~N^| zXbo*37TQ94=l~s|6MVqoe+b{f_t2dMJ>Wa|9(qDAxDw)_H}rwNkO2K45t1MoQlLMy z=CInpK!SrH4bou^tc487gu!qXWI;Cgzz;)UC=7$)(3*>+4U8l>3Pytiu4F+xj3Jl@ z`QU;AD1;(#tG{FALMSF!0v_-}DU64CtTG=01j}Fol*2@r1UJBqFd3%6wJ;T?!E{&$ z>){Sg%AIf%!JA}!QHSJ zmcUZDlnV4RSVnL;L=%jG`v~3-t6()06KM&|Vn?�Tyh4jj#ze!xlIMhhZ$6je|!C zJ_cK18$1qAz?1M4Y=<52G&}>(!gKIEyZ|r4OYkzh0aT#e#F-br!qAhX!yVTm%iF5j2J-a4}p0O`#cF!DY|_6oQvS6f{?t zLJMdKtzZsGn+rJ{OfJN-pe@8fJ7^Cbpd)mG^H}+O=t{5~Y-i^?peMmz@GimkpfAA$ z=m&{Vn+VQ@6oUO>0Hne|7zAmM4jGUMgW)R3f^5ivTu9=CCc`j-!{KTe0a+}_hHD5q zp)ncJ1iC{Hr~x(MJXSd$N?|n}Iyan&VdvFKb2~Wc_ zun{)FPw+GR0>8q8@DMx=3MxL;Ggg;JO_`%6YwNF1>0cN>Tk&(yNqIul$zzuW7{D{^`Y zU4GwNteacUF~RLB&-3QH93|fTvSL}VSB+#Jk>qxCmz4UvvSpmh<9C&eDRw#XoW<@j zK4(Cpiug<4ezqLe^hC94&6T#T+bR;d+f&$6DGL;|?aps%%kTf4KN4SbVvN$y=MK32 zMM|R2TkcU3y`__U?!uyg(y@JqZc4fYD?XRs<(uHjmtV_* zELXnUAMm-ylyQ8D)03~1`CW?JqxijLzC4%un=x*W(>F;e@cK&pu}Zl+P^5T$>VLgu z0f|I1(k**ZvnW<^`dmt>%U9wK1YG$Fx#`ZAe*}u00l_8F6c>BTC5I%VJo$16UXNeh zNX}|cHBqfYlI2&t1x9)}b4p2>KOhGZa7sFf(m6&>nM7ygfX5q<1CNzuZoi~YQHtdt ziNv}`IaGUcBq#EUo$eAj`4uEXc4x^8BN=jt`DKz-m83*X1gSaQlyD}9H{Y9AR^svm z)I3&WlT+1B&VpB#`jir9z~ys0i~Yg;R!@_8UxgT25vxyOp@YH&ilHl=S4R!~qgdLci3s)STh!9#c|tGT2*+ z>@GnWoRF21nwXoGkfjXH%^IAUO=KKgQfhW$T0&}ia#Eb6K=!I64@u6*QL+alq@_hX zR?=@f(tgC1mYUEnEm_-}9BWc)R&rtv2NnERqU4~YF)dcf9-N$*%0H5aB}+~uWDSp% zSS1#TH#_;NTv<&PD@h6I3H_6^mFQDsjhvmt+^pnu(w&(i(PZcL%g#y7$<0Yt`e$Y) zsac+#oHZmhF*&=Jl9tIyNKtaLlVfEcISK5XQ!H5~i{;<_aGS*en~{+ut$)zAdluap?s zx2(lsY5%m8VaaJZt4eB$l8`h+Vv%I4D@$Zj7P3?IawEUY&P^PkXM9}ksHmvgIaDuN znePYc%;Gb*EW8USKuh~NL55?_=oX(w@)g3ElSzXX*T2zX#ler1|zod&i7*|j36fHKz5}l&PC%uTQ!P&BZHcoUF z=am&piq);P(?xxr=XKGIa7*LkrwgEHXU8%FuBhQ0Sy9U27-^c+=G>AtwS!Ua zmJ%;bQ84}au2N}_l(JHZm)%Gn2Bf7_V=F9k`Tcs#dgiGOtRh#EbYbZ5$YGV4$C}}y zQEZ#;a^}mWRp^ygsi;KFl#cD&t1-Hz9af|8(-0O*5!X6BdES!JGOhJ?7Avlak|w|O z%*gl*+DEf@)Gwufwk7UzDd2jFizg}4;>r~*Ns{iGQbZ!;Us^^t7qv4PU%q-(p*V*PIQ-)l_-{@u$*x%YqZ?3`bw7?O~d%RW7&aRtR*XP|56 z=pO2iexXU-tT-I($l-|gyNV0+tC+ITTuG7&Lb^93PpTJsxRH)oDJb)(>(L)Ca_1FE z$EjE=(^@ooZ!A`hfZ|Ua<`rPQ`i#xxSbeVXayiJsY8gsAq%O{Sln&}IIkqWkhOi1h zQH9Pj>6AG=Q9ZRysJorTjyzwf-=S~r@@X}pwuNjH@W~;lWifPXbBQBjJX5uENjBPd z;3)SwOQoP_wYIS%jv>~#h8*e@A4`dI#j;(E|pR4oH1yUZ-Nt|lkUP3PZI;i;us ztE!2_T7TKhkU%$E6mW(@&dm=^0IydiO*cDTl$>&1{hJSf!n4 z&9Zv>zl|9v!&6|8)IW#?N^ji?KVvsCdnYTM_M?3DV<46 zPr~S0N~1$|2KPv8iDf?NF$HMa>{%@N9v#K{Q8DTeM95n9yC%)J%%;G-TO?JS6fBS06x-4fq0%P82o*QK)^j^j5n-`Wb?divKA&@vRIT_jdo;?@h_)R@DQ%5o z9nrSqs2HW4E!et)-Zy76ex$P(EuAx`XHxVjQ_Xlqcr!yv`kh78OR63i| z=a6$t&dR`NH1(7e1%G^wugnz_wzm?0q2xq#)JRW`6zI?nvevKHy7O|g71puVZVN4` zHvSoF(X>@6}OS`A+KK7LBhhLs-SMhO4qV?h@UA%$< zm$X!QE;bn{tx}w{%hK@*FPKIa%YRQ*y5!o|Dntr#_(f%j$y~4Fs91`Td0ki*v^D?T zVN^_bNixPz;`m3jlq>yqguMiOleCcpW0%%C(qZ;D<11*tMp)a?QDzyY?kf!_gItr6 zeLm?$$ys*!d?7p1%XgBc4;{P{k-Vldq-+G+b7@8^IBQYbek|3ftaz#uWlei*1qT>Q z!b9@Z=XA>uw{dVG8QNS)_V~+q5E607hl7%Cy4!NAl~>6KQFP>3EGJN^w8-nk$)gsz zBZ}dMe}vYd8f8BNX8$r60b^WJ>x3_fV~O0=IvI?QwwE!r+o}$a$);K#wvx5YxTPm* zY-IgiMSF4;SNTx2-eAbl{=-y;<&CtHk;VkWs{`|(%qZ;HW-hN{MwnNZ{nkt!ZgA3u zN81+2ja6V$sVm+XjOf%cxO;QgX41mWgndbzRJsxwji)-Y5izm~jzf*QvIxS@%qchV zkC#!|Qn%JsAVA7W-7%4NW9FOLr7?QY@p@x4Ii!~nu?`F!Nid_O=5+cB{qfNiWUD^F zpdM7Dy58K($Uv!#wBw7d;M$I{M&R3(* zx-V2s+T|p}lRR-&`;rlxql0e8TppH`c*y)v`Q1TPFsGyCM#U*{hs?-)PsV3SaO%Tf6qQ^K5n;Y=Z*U0V4Ck zfzct<2RqHnN`1N@mxkOWX-97xZL{1sXRA*r)JMiyE~BqzM$_LT!#fhUJe`eGWW3i| z92d(7b(i2xirjUo(>$c>Yd$EE!L>XkMIS39X}Rj70BPIRsUb?bOhzbHGRp!o@?q{P zn_+&olukYvQkgkM z+)4B~q@4Vx2J22I9hDBqRTPm~NTfbj$J!LNJ7gZT(RlS#DvBX{d>JEIJ}`ye5g;j*Hte`^L|sSXIa}? zp;fuNDMOVZeN7oPq8n|y1V?o2X}LlA7dx~14!9pwdwwHAb~S2L8+&hXM5mskTDR^P z6DLpOq>74`*3Qf(kE>jIM8#TXN(Qm~(OM_oU*s&c3`|J7W%Lqdzp_DDiN_%i?(@b) zN9$2oo}rip)zYq(lxh3O_XZfm(KV;e#@@px1?3;7+Nw_@03Y? z$ruY6r1sKF+;XR1)=oXN&;cnkb!z{}d~j*zfZiX`Rtxq)q)=#&?IJ&%3pv7n+q1X- zalMhyeE4VmlAI#1Jc*WxIdU-s+&;P|X>m$R#oAw?qtc;$uqC$lPK?_Ib#k7}s?6t^ zj4Qt<0}xs#MWzI~ip|q1lk0d)!&Do+lQL$KKEt7i$AMmdo-1GO^Q7Y=PwD;A_sN&> z7#SwiXWelD@-tAThw`*uA5_osPLhDx_2J)M>5&B{1=F1^Gyi02O#+XU%$-Z$Utdu= zJl#4)chovOow{{s+o}8ib$7J9R{r&8lNI$bB6Yj|ue+mW`+w83N%KnmzwS=t{?z|< zcm8K~cP6%%u99@_D(*@-SR>e(Y9H2_l0KG$+Z{)s+-qIc=mvI-kv`Hy%XVfbM%IYb z#fsKClI<)VD@S{Jz)EZLI^$y3x?sWHm!vejN7lv`i*&^#nl`p*LVIi04xL2V*1oc3 zcjiI=)4m3sUy3*5siV^59C=NQ$`->$giG@dCQb? zZ&`7^%$FXgO+UAuS$$B@Ba;UM&T%qBz#TG)jMzdCD>l+8(`J&63Db(~jr4Cek7-~_Ai?Wv@vapgzl3KBdP|_?pt`%NuJ1(#PhQEklb`zsp zU0Lb&AE>>o+?9+wigx5JykSO zz@9pzORLR)(BJHGQ2HEN*`qph6|3*2$z(IH)<;t>_ju`4xhyvn*2W>l@`{7@wiB0_ z`mUAU@z#e5ooX|zKD%H&iIF$Uw1sBrNlwqqws)jtm@>Tctw(9}s)Fr>8IdtRSI;!5 z-h%JBD49|Z_{yd2pv$UV*Os>@^n{vGSub)+|1HVBWsy@}xKd{_<`sFR6BT^Ko#HNJ zN?3Qv7W#x{yk_lV?e*AR_?9iqwTl9QQh(2O?OYQl`s57hTX<)!VW6}l$K(~5S|3zP}; z{&t)ShZb<~X@T~gI(CbbXAxS-<-)DnqiFRENO$QpU*S02*CfoV;q-SU-S_|c2he8v zZ1+C@pLQcD9sHb5og-PH-^rULlqgcTt2gfGZ4IZ)qvz(qcDGJ-X4{+=Kdk;x@4jgq zZgS*#$eZ=t@OVk>91behFOrZ*Wi zi`;ur@mlJfQ;iMl74NCv+p=Y(Emii)JD3Zciz-e8F%OJ(!VE~BR4}16(qvT{3v*NU z%@3VG<9t=68M8E5Y;Mn;cx6NfdH58ov=>7j&Q~#^dS5s~TK|huI!c}l%V?j}$}(vv zTGDPSo5AP%X3nYeW=|)}?Dq~;zAFfy1wn>4S5kN~DNp;gS8Ew3k*~opyM+wOaAw%c z7u%@Kg*;>qIAmZ~?sTTf@w)x;Z+%{ZeR6SlKiTLLM2x~*XkKn16OO~&PX&rl1@3$$jkKQ^1&W`&cq1YVOnNR=zAFzGRafjgO1GM^vAciOad?#%Gn}Y zdBG(#)gf1cu|=WJna>;kOmCHk!0NnxnISPkjzw}@ikJG|QQ^7blF>waQK=-#SjD0J zPflmW`J>*f#7SMP53*LgCc8T2nH*tW14hb1{~MY|mA{G+ac=vFus9-}7Ogj|SITC! zW9E3Qyp5xshF~p3TL0BK@5CWRBJ$l~I5h3^4`k&N8Y-hVj!|Nh-3RC#C!IoxPgSAGm zA~P3~Uy-*DEmxIZIj5?5NRX`a@2P!=til?Ig>ok)sntqdNt`o3Us`H;eBty|^3s$o zM@IDI6D#&wcT`+X)p`UE3Q6`ZckM^6TzI) zM@?+I3Jzpc(9Mv~-gpWlew;xcyqBRV?cqpp@ZRz(^%}G^U)He)ZCZ8(Pkby-dfFKe zEKfCaTVD9mW@>AXBaDXu_7Mqnz%KY?$UH~JVHjVQ;jHJ#?MDfptCMR$-kmZAJfvr@ zy|y85gavEj4slAl{)Iz%=UKiSq~4gyFag5{YR`$gnTj8Im?@DR8;MZ|UDQ03car64 zmNfJ-r(TwpXfbM!fn)&1%Ny?2+n$bbazExRmDPOSvce)oJ~$x{y7U*?)r15;gCw5| z3VG0_eyqXPyK}}V$rGi%mJf-rwLXp(OsKrZ>h@|Mh*MWEa@brWIEbNLPjcHLZ(_=; z$B}YHZle<9*__OA<9UIy??TO9Zdpp58&_ z#aUj^S3i)!5SuZ{-FE!)4sW9IWYxIo(#Ha&_iwrNH0}yyj8D!!g_3Cj&X5dLZsz4k)Zx6)(LA})2#L#nXD_E+USS@r?b>)1MMeeq%%Hhp)ke8O>oQ{$O8rEM zJQU@>VP9;>Q7>m1z@T=peDWvsVhZN2Q9xy!*u28^p|z^*iRZJD6sCcP?ll;9u$XDz zc9dP2*}=6JvEXY3>)(|aEjIQ08?tx%9?i6zYB5Nkv%|GPZQ2fWCz5p6!6q6`R8_E$|VtAkBgppVXKb-4P80F1+NTa2kK$VP9l82 zhE;-3=%oS5lMmo(l{-l@Mx3Q_RzE~XN#JE)nHV5-c4)1nrHw>~UZUCnE*vE zIL~FABtBXg$ByHS!w)${#v%Eoe;K}V^a!@~huoa27Ymt2pM&MW?gi2D6FEV0o^9!g zkQO#JUhJgIYG3|QZ~83V0(n<6@)J-tm!BxfUu~E|dnCZY$C)7P7RqeeBHa*}8(Pn~ z+FFE68gQ)`|G`|Y>hlJEgp>ZM{S zLG~5cd*rxmjk&&x**}Z2)RJLEG&;c*rHW`FB4$QQp|;1 zEZ-f8oIf#2e7w>=(v{3MMjAt|EOR4;l`JXI7KF5Y#>J8(yZi6 z+2^Ae7o7aJvNIJMc8V-zo;Xf%O`qQN9QGY&`(+yWEJ#~x%;k{JJM&P`;m)T8mZ{6d z?#b#6rDaCfDfUsp*{b4B%&R7HQfZBX!!}wCQI$gsR;3jr@6-`juwgJ}OG%3un`ODB zcF;P!_I`tUT=Xa+XF{C6SpH0e-@)Jeupd0-DT<+((7Pq>=Oj^#{lt07N;(n_-!$Ux z%<(KtKdWiJANd|xb4)p)Sdw_Rad>sNaTrtbmb#G6A0NdzOn-|OS_Q^{MWqY z)%Pvk2`$ruMUB-WABLnb@&bRHOUs<_(;kvy{a7t!l-&h0S1rfd(53Mfj5_+K4>K6M zH?z>5vHDe@X5eXMr_cC2ii5qy8@sW$jCx^>oUhu25iv7^*QTT?7zk#XHQ6CKBkg-| z3qFy?q?hp9R-+Zp^;jkWYoBf*tAeAb+R2udvEuvCC>j3r$Qv5!fUJ50E4y`0aJq|$ zJ1%0A%1Mk$m%nPGy~iX&0y0@#9Vs!sAXCZyJW_bn5p+dOUGRQ4VotLw`Kk02+3S;# zHj%AQ(Jr!zGzI+O0aviE#c%l@yu6R0K6OxU@%_O;a6d1bXw{G{XcXOC;4SqC_OnL!=5?nY=F}^NPTY7sgIM+SJ-iqzzWZ)+MLG1{EC@61(BMMQI?R1^f4Fy0)$$t*^(mnO6Ieb z(03o~ErrA$WgC2nQpepS(qgZ~;g_#N%27ocr3o)Iws_=xs0C3T(#tm`DxJAb8Q>>) zX$ zv+6~M%_9~$293gw-PlTlyjo-$mX~VzlicSe~kO z6Rf}Vtv)wTw7d_SB9)jigCa5b9(HA~eDlCWef%zEEqIvWRVXF4piGLUObC#2WK5kf zo}uuV%Y5V5mM@$uhK=kuep0Kde{V_}U42wt)(H*>+RtL-s^!1$ZDiwJV%s25p8EQ; z@jkB1Uocu_UU)ZNNj7ta!Bis}a^4mdJyw1fGE>9y_$tEVya~)*#Ad7$Tg~&$mK5bBg^39%KxQ*a>G11 z#wATIs1o;J%fHfPyHhnyU)I(x5F>otUJ z#dl0f^pX`nLK|5bWyyW($+TXiMmlVziWDf40{Q!m{53ZUiCI9*Ghr!)mO@jt_BSl~ zT+e(Rn|I^o0<^?!?vE2d6Kq+=Wh!-nI?+__3n+zJ%<`2U+ls+^fM70BAL!SeCCaef zMUGJ4bBR+XK$=-&O`0W3PP=AcJX0+IYPwFVKF~5Gc}%Vqx3Ep^RgYA0>JH_}g!-66`vVrinC)wG7Q!M7$zh|;Qlkx4GHe>T zs)`(8Fe@sEP_MP@)y=8mvY)reYX=WrAAC@2K<&0byT{R9|ticC;wr_eBn8 zfOjNhiKSmug(^?mv9$R_%Z1DblJq%rmRm+`-l%qYn1$SUNLs;c&Z^aZ>VE%I6<%nU z)5tYj^%wkblzF5SuT0}kRLf!OpVGDcfn3f)u*R^Q4SgrJNR2wea$~1X9p%Q^T=7(O zrnV0+0CZ#M{uSgo0$?nqV ziprsN-OxX;r(P(byR~NkM<6dr$xh|ZOBH)Ph3dSicrD%PW9HxpqF)}wO4rmJQH<~i zJ9sjT-j_c3+5Xb1)6>rQB$K(JkXhje|H!z!kz>mV{cB*Mt-2*4T)tXS431&yTbT!BD|x{d(6|$cbS&n! zA-j^jMOt0XTGdvx79GoRr%!Gw!Rpq&)?pd4eUD0RIs&H4MrZe)>DL!6e zz6Wf5M#U+r+AWKD?t@2pTK#TgdrJ?(6;~*K(8~BAF0W&nH6Q7c{)1(qIu#^%i(y3P zP%lc$4M1=e;-JB|Huko@jL{g%`;*p$7{3n26ngd|-P9z8HfZFzp4Fs;AIo_@MHI&U zkhYx?%rx8eB9TORL+t;b$1VR-ACNx)sE!VBe`s|4Yz54`52XbSEhWLK#H{v$XCYWL zf~(m&bpHiA4=EFWyYQQPO32lm>9KZdvaK@jAjxFiOJ_I z4>q$#5{xmi;58V&POuInpX8Q8Eq(d}h0qo=Zk%gU*t0;kGbThQ8{Gh7mwe{Jel!(N zXy!PLr0Wlu>2VBpDeItf!1k@hu$lSW( zvKbOxSZtx^-ke+~^9u{gVOs`S*rI70Jq!tyWt zX@Y$m(so#ppR$A=sF5*})X1kMbOSACEH~yxvYYd#58Q>GNaJRfCM>j{Fm>_`~436k4;8CE38-1o<@Z2W|a9|tMG(Z7E^_Vf{oO?JdG?ny0<;^zzOSTm@80xka9jEb?fK%>VO{n?^$xpiVwzjIfw_L_DM_&7yg#4vK zX-jCcWl}utd*w$E+?6FVy-tvi7isM$y(C_bIYnGjWw66aqEzIsFB;1$PgjKz`K#2P za&|&5J6qix-cx21%TJ|s$0@h2s9;2BIN4|9k9pXd4_1>54z@wozkGb^;Z`>1N8ao( z-2i>^SLj_!=^#AU0jd}x_1l-HE;94BTmdtztZo&say24Z zQLi3^qXL7adGg8r>ma~g+ZDB89w}LFlEkFU19GIpw!kh085!yzyN)tWKTOY#JWKDn zQ8$P!y-Uxv==GPQK#$D$K;YP6V29im39u<7= z55b{B?CBwTStd-dX?e}72lvxR-@7kQd7w9S-H2-G=(^k{ONuGetqU+1ApA5NoUkPb z2L;?SDrtiUFLcZxj4u+Rj!#kjb*>0~uB)rGNF+JTfGL z7tvyq{mY9S=cu&!90ZdZNe~FE2-p%!JTNm&=Cl=JKhDg(;)pd~lBhsY83xW0KS~UG zIPp?!o-!4fWDXm8#StB|%qNmU3{Mhpt{5&N@ypH{Rp%o+3zeFB`jeGDbM1^rr!x~g z`xErUumsi))m5nZQe-%-bptR|&xv;MI6*@XG)eIQvZQqAng3<@ZPLgv&Be%Q6ZV;) zo*axbUprYM6Nni5dK6}tk!%=NqSPMnn%Jn;PXm!=xun8^2JKx;Fjs8&#S}W6xh*1S zxqwT3EysIg$u8QnzWsru(+)!IQNPBEJZgi9I-TjOkCvj#;5>rgM1LXPyrNgnZ60Zp zb`lw_^EM-v6RwnXc1fu%31O%bb+Q?4BO(j+h-@V73Qi|>pSwtP;DKc-T;&)5n$`Le z_kh3?U0ZB1|1S>)yLy&wnP-DY5^|1GT_O(qi-SlqJ$)5^o`rcaw5if1L$@`j7_m`{ zTT^qbO`csQGkdlv_L`}bOfufSuq~mxEXIYbmPN+%EVrdJq3DZ)}>qFWaZ2egY zWB)=y?#$uHR}tQ(E?8sx5Ih`){gLV};WQ%b=R*R#7EbjOg`hLTZdNNBb?gj8I&BRt zS;uA9kz)b!5k!zt&{8uIFdIhX%Teu}LspI1(#E;U(u}GvmKL$=s<|~sg*2)286X_h z&o(2S+OdZI$E$}HL!tjK-DAv7!GB>Sg$|zcO2lY+*m8kpF%O+s2~eb8e1;A>7K(K8 z1B^*Mj%iH}+&GUl)z;pZth=0xn*MRf2!7W0@vWUc^j2u-uF|Nz@nsAqeG+YIw#g1{ zYY%9R4CA^v{K$Fok@qzrEI&Xb97T5GVh77C|1aiHN{1wq3= zftmw1pbD2Ub+4bNQPgNwaQ>$g)pW}$H1aZ?jVgAQ6vHY44Rvt&1AZ)s8yZ)38%by( zlx}Kh8At#42jB@j)o2AA+>CfL4ib8Zu@f`S&>U}AJ$_>8FK?BR)Xlw4G}e_T%NF4# zj&fj#qksekE$&GZp}F$pXtZEaSZahCS&(Bi1`ej-nJU&Zk*r9tk)T(M;eKDsFg(Kd zHtx~J1Wa#x=@4mXWn^1HhV)=W>SwvFH#+P6IJV@VMqwvqzX!{}j-rM3vjrDw?jHqx zo$U)$6ZP{rQxAR|BdnZWY^=bWh%Mnb!f~xsW-h%YSSYktZ1l}09yf9!B}8YmVv-WO z2F61ni7`7^Wa`;Wm_hk7vdaUjc614L;9mzFJ`}g|&bVzK#@tO<>w~fR?_h6heK6eI zPww_XYd9Jvs3Z>lWA^t^SAOEQlj@li_j4@P>Ug{zcv_PnidD|3Ux zX!6i73&pEZ?ZJ3!HC6CP55eBDNXA##xC%KCJXDT}Ih#)onjQ@>h%g2dir_x&xn#fn z?h*4%r!SyeMwKX7;ysB-RflX|krqnww`&VWyWr&rlvAv!BL>9EbcFD=RpbE`X`hAW z4L91JGh%Q#IQ$_>USrui!xAMfC=vnaLEDGO;FKCLRQsY|JD!IuvOoF{!kzZB(mKVPX%G ze+}|0>ll>HPD3;@k-*9-1-3#s(2q(GlTEH5HW^X{IE3JyB=!J8zdTfOg$VkX23+=ezHzmu`XINtuBd_y#&8DDqRW58I@Ex`xz}$I% zvQ7vpBL#)r${s>gJ@%kiB(c{JS{uJJ*~3Uj6+DfAZ|Tegne34;D-b|UqLobcE)}E; z8VXvom$5x3#$}|mQ78T%4_vf z^R6RV58pr)eSU#K{~~6*$u5ZFvI(PrewrOTJP1f=5uKPI1Pqo~Wb=a523p-VxkOu- zg=e!(#4*PVAgJ+YLilaz-*Ri%^4k+OGjywT`4$%k7xW5s+t7VBnlTzl0;LtbY za@Ru>Xiu55s-4G55}wIRaxxf>-KTD*75Z=zg*{;XJ6+21*u&;YIVp3-*+N)NYa(`~ z#^ZnE8CN>DMZnu$oN+QH-mQURwdtFxFmKQS}B= zdZu)1r`Dxx9q))b%uZWyAi7G`p1x;j9f$uq@MBm&tOZD$(BK5f(9-q^$yG z!Uv0OC{a-$bNt8pIJ4f|jyrlPjn-Q8%rWGlB*DzLPgdnNHQf_na#YSX46Gx9 z^PfHx0)n8$uB28ZjME=Op?kQOmzX#ahxx>z3PdX@QUpk&HmV}dIAnp#ilDyAsJT8U zLl>WRLWXt9QYyqoLm_g$Vqh?7Xc{ZhJl!^&1(U{HCUSU`7;rjwWmHT%?$H{wY2I)~ zX+*YI$#iF|`^=Dh+%P^j?-iq;uXW{d&)IB0(A{RY{liCQyYZUgUSh0W)ybXsz@QbO zsku#Otsk06u6vlkW9Gf~1K$*0&uXL1W^3x-iRUqJ4I@S4If*}D8ZsE*Jh1UT1XD5e z)Zox&Gariq1XD9ZvHu`ud}(K+KAa}_2r&>PxEwKyIX;M;o#N*W&oT^+NH@Kr0j)ic z0j9uI?r$2BpoJ#$hM)dW2;jdn8%!f5@fH7B+5ws3&YIual>FB2D>K`Q7z?Ni!UUFD zSXwP1gBCNxN5m&6SLZIWTw)NIf=N+dk80}?oTsx%-yj!2#5;)(9SMo}!j~JSA4A4_ zga=}Zh_8i=jBK@C+Zt|gg?A~i383X*CPx>nHADCp^PHs`;vQsXipYp{nRY%}!3h{# zP}cFP8P}NfeMV~RF)hLlIES4q*x@3(e!(akF&C{f0_wmAse=5nH4yiFl=MJMPJ%JZ zErsDy!JUiw&>FhXNrbOd;MG5WAoH>u+*XGU-|fp9HT{cKgBKM?ry_&7!qzqu$HxiY z8f+^W0F<1(2jO z?9*io=k*nYFITq?WR4!q!q1l3+{_&wK61$8n?q{!W)4&T zli$=uftg?$jxzN)YR{;lx#apsvr?{Z)G(Pfv4s+{>&OVT5ljRJ1?HUb>MM4xd)R}; z=!UkFE|s)e4j&X(boTJ`6Ur&(SS1$|-&`K_kjr-9>MNTzqqWp3XRvD>gVgJ!lSWUG z0tDEVN;W}uioF={tm9S;qBaT;4tNw2J3E2z?Z53Y)$YHnj;{seFD>VtLfsNBv<7d1j^IaWlIx;O1}+wjS+1A;>bsTn5Ha0L)9q{kYF zK5nK*_>S@^y{*>O=0_5fv0aA%1@vm2#^E=3 zD2^Rjal?E9=%AE&IHQVTk!B>q$rIJ_Jf;dqRAw9uA{`5EnlQX2kd*Pc6}K}rVhonqT`ly zOEg8#abAd-WJO_F7GmPe6APNXny^L@A+b0$%W|J}JT%=^dn9`N{%UJ8#nA%IzkOX~HmrbvYT_@yfly+z7OIGODYl11+masNg zxxjf9n8K5y?Is%p*1(sdCtO&eQH>_HwdymZ;g+eb^N2enD2=x0oz~=ay;MT&^g>^w z0x@0kSoFQc1KT~q)H`*wAIBzL4}&QjiW+cuGGQrHgb^J5=^BnBB4g}7)&eR8X9T*H z6Zl{FGz;!H=7~aFE(@#RKoN)eA#c19+nu)4NJW89xTH7H)>~^!!Z3MfPDQdL@QR{D zvx)NpOE*G5gtktFjtSxHFJ4-rEXHCKm1fwbQYg5S3sB{1z0NgPZZPBKTJVIRV00;S z7tS7Ku0w;)KZv$7s4#p&7jh`cOIFbI70t&;LzsSNT@ixDGuT=|Nt;y`K^w+g<(S!X z?i?N5f({d^flD4I>?BL(RGnstG<-OMaHhzstQN;Kby{<}@yyCjI22s5lDmu%fzs?X zM0onW^0I?LxA7AT{*@qyvIeok2Ub&ql~i-I%%nl&<9a;5p?JA5(U-Q;kfk*Bovtcx%pcfBSWkCQf9faJ}bey z4D@TPL2DHmje#>yeIc@QiD5gz_!o+vo$;Z|CuiK@hrUOr)yj&8nFUg3h|L;liZnn^ zDSZ85d6qC=ZjW|K!*0mJwA+_Mjzs3Qjz+7hQY>tPXh6MVa>tIgdMEy77hb$$$BuqC zz^nya=N1bnrNPQ{p3-gIo3a#Vsf3@p?nWf zzI*57EkL;+DEk*u^)~gYBz%b24)2`28`zEzTmK!uSj5V2puPT{sMIx+uR<$? zdS}$AWURHKHc^;=Ak40vlTSzS&kQOP=&E@4_%J~>ub=!P$nui|kf0d#kN74K+K&+A zz1L07pp+4S^e_Iovf6AEO%@zDe3@qpqmg6ZKLPXjgX3?#xbRYv(m(UC`wp3oDl!6K z5xuyAAmfHZ`_Cl!v1=y(7_59A5cj9v)W1Zl{jbC_yHga&iN3MqtRkU>oY-gQNc<>~ zpWQk6M(}hL$oube9nYC{xIaeRKXvWoAE93y+y>$C@$}=w^U%)8he6{@foB`-uA^VU zim>?|fu8{4@wbhC;^M;XKx@0L7h3g6d=LoWzmbr8w9Izu8gV)QcfvelrwHxCETw

@BR}-jDJ!_ zkoxvYh&G1pi#gbiI_$wNx|LRNo+Ogb<5JrwVpnOiScB(MqCH$q|BGa7jlW52g#M-1 z#;3g&@nbnQ@vny#OtD7{=mxAbRbo10P zL_I;{VgvS%OATnK?qjgJS%0;SA?X$k(LqfeTKQJ_tW{pZ z4ti{;Y2-E#=+zjY(*m(JW!wy?+whH8LkL4bqN559vND!gXiB9%R8Ioz>t3U7$EB^F zjK7P`!tD-^^LVJI;8k|vM{nJ+D04q{ zeAgP)%&4lLXW-lBma45(n+ykhA*1s$)60mALo6Ro?}6J#UUC-{c5^?pV?xya7+DfsY3O0IX4YGXUxb?gz9d0e zT13-=@0J6IJwnBGq65%}IB9Msf%_^f5mk+n15RMt4JVD#yh)OQ;sRb+GEhYcjU~1Y zln4=$c43>8a7{!!bdYfdQOU?B*w9o5(F!;$9Fd{eeUA9d5~3U(zonT3G>B-m6`Kl9 z2oHC$dlu4{2gEP{qw5Lo(0qK(!CC`;ceYuH)$SPK-Y+xg{b;`Nz=OOAy2dw2va)auc%%2{c%gjxm zoPTg+HlM-onc3+F#!lo>G{m*>4_8h*}2Tf)CoMDnx7rJ zzc4?I2ZJMXfE)xdd^IxlP$s`HGn=2A%S_K^#wKSb#()m^XGf;y$MSOrGGkMt6NM9F zQ>PAOfFd(BJ)fBvn;e_RXY08Hb2GE zPNKMx%*@E_{Mcw=Vq`WmQ<$BZo+C0=?!?&K=)}m_Wc~z>MwvpXnfwF!srk&@>5++v z)Vh-XNKNl2u8Fac`zP|cG}QIP*ld1uo|W>yM!`YQIDx3&nf&M&f8-bP;Ka!6LkEBr zSb%pf|MCJp!|Tk6k;#!$`MFHC2W!weqc|yXl5|g>1e&?R{d4nU^M(0*=G64`31RtM ze)fT}(fr(tGZWKngp--VT>b#cm>*%`Y+|qsukri-!rT~{2UKHI^ZD7?!p!{G^wbbq z`$4b=^&bH+TlGX{dP*7>%`-jw5Ydto(hLVO51!8BIhuP)*KK}wgiM)(l#H@<-G?X> zLNg!KG&7YyH8FN7KQ)>sMbpId;MiP#2$)7@$3WjHwio5qa(_>1rIy>CDfu`uQEP)-iSld zq(B=Oz$nI5N$kjD<_`pGZo2rF`%8^ldEUD7ee8?4XHkyW*I3PR4(dyMs+ZaX49A3^T7F^0fJdQ?nJ5;V9gwkN>VJa8U zfdKGk4Z){6g-C{ry^7whMapM*xPMAygWFfO-HCFoV?L>>mm){)(K!*w;xGp(RyU6F3Qsa4i6WIG>u z_tiD`na6W1BT?J#4cEWsF<|*GWLZcE+b2i;aYWx}?vuqKy<`-WA2I^VW=Y(}eTxa4 z852=qVNXyZnIMS5#uj-9N{aCmg)P>xloVUho>z#5hv1bV9N5}W$L;_a6273=ss;>QbHX9cmQbx=P81agpK4VUdm*c6IS$N>Ep{qt4p3PZ1WV z)#cg>cE-S>l`L8?%y6VD^a-U5o1!F9P$a>mS;mTnv+TP!s$Y-!h5>Jkg2`wFg9q`9 z!~0?tAwyrX&((g&X7-IXzscbnGR!=Rq3aw300&}}J)_$7q!TY_)ggLc3Nb@MOtm`^ z40=hQdQ6u3kHrFtT(v1UqYMTB1-=kbZ!%p#@Hi4SVB&|-X}GkqQHN8l91sz9RoPi( zR5+Kj%}T06o7snM#}X#=O+_UKcCd8O(K$+${W5}hZR zvoK2OjJ#5RuF?lPWF-+JvV?HMbUYS3>q^fIW1~7Yi}@-IWez%m$+@fue=e+H7I}<^ zll2@LO2OuE`4!?QWej0iLs(bv6iJz|t?O{kgMf2sT{p_K00U}_WX;1`&@>YqA-?WV zdrTM|dvDj5(Le+0sWQ8NnoQxJ2Iobzds<(E^9ep3ACAwDuU=e8`8d8L@(g}FIX-uB z;ps`ti3O`W`1PUjiHi%n5?`xl;$`54eipxdF35mmayNcw)5tOQMRjiq!s_1Es`y6e z&PnlUJR3;i`}ize+Uhy@3*X1B?z@deggGs#jFkNTJFYrDpjfUtKGIw;n71%(I5hB8 z$H!I2$5qG2RmaDt!SR93<15ug_^&xih0|;<6TsmdcY`oaJcs!M=eq=EQ5YbC|M-tpvMpn0a}bBM4P8iQh2A zL7oTh->tzl0sn+`K|8F%Kurfd2!>oq8Dzpm5r;0xgU;90IlW#0;8kRU6<4%y_ATOfkc0A&(tqXPoR8 z#*yBK#bYzS-ZJ3`G82Ijc(E@aX~4RzkSZ*lTMQWz($QJ8Q|cmV^yU>yWj2{ac-zPGn1tYNTCa-w>X3h zlTn^fFNEkY+R2fH;&^^`DnC&i%}-1eXGczq73N?cgX(aes{<6x?j>C}uBo3}KY-&B z&QpG|TvO!fS7~l5XIzikY|0%wnVBEUdQVC6N6G|wy^f^{G_1qq#oV}SP`QXtj(w)j z`C(X}RCg2w)ifxIIagc55n4^S^|Z4M=gfy4*h)Yw=LajZmCMZ1|A@AS7Y{uTk_Ztr z*^%LOs;qF;#ARe*Yq=V;LZ+@2gffUz1Pd(9YV=^z3?d#BIxD=X(t%Bnh@dfRY0S3; zA=Eo1#BXTA;(dP`zHo30R6j6Ra>!pBUnxS}T=ikrG?HP~0l&{#itT@k*s(^ah8Zz>@)WH7gsN zyjC)@kM!RW2^YUI6|u$|ag;UHR&<{4GegUeC&HT@^db%NeX!7KFP+Jsz2#T1#r8(BxS;tCu=HnUf+@0J+O1#gJae8)2uQ0t^KhtI;H5_@*& ztgG8nkxlBMEqNhy>{$RUVkN^?aSI&;no_%+!d?mA+TlYOYY$?{pplJ|8#sdlR7};^ z7pj2t0291UQDkt%TuiC#RE3buk(b)A1PLoOTBA&?T1<@*R~o(*bwA@dULf9qgP#LfXDt^j(gJ9MBh z2>AaZ_t?iPIN8*ifUI0!d^B1(UV$|(gjAJ&rB|_7^P>FO=bVldZHfn`+#jB zb~Qu><_=^=P-%-WT*|c5A{Ley3iJ*kmp4C9=KJ(3v9kAh!M+^LEr*oYIw#LHLZ4yQ zj*ujqivt}XwrEB)2}vJ3QJ0w&OnS3SZFb8Ab-}qQzBVZG)3>0&^#$lt;W2-^cGD&R zUWqR1Vk8jM6TjSATXUeOhE9rn2EVvzDa#^;g6(jn3*Ze-HSi-2jYS#g<)8{qw$AJe zN6)|t*cV;IwT)=5AyhcP^NZ z)u%fq)|d|TtT{tiW**j@l3R_+^TEuNWH)dfQ}XPY!HW5wWTvnoa|G{o{=4j*1D$I-!Xz0^3TOJS5m zX|obt@FXr!fI|SXXFzd;p=|%rSB~NgUJrrj!p;$dSE&Ki0!2jKU{S9bZB-l}th7co`*FnETjJ z%r8iE@WypcJWlmXL1b}`Dvw8sR+YW&fU&+0$f;>9cuQ_b{i0#2~)dwN*wZJSvUyco-Sg{+z@2qSS>&8K>+x1&re0mwxb8CT! zVD@W)W{qz@XvIR{^Kogb1Ne)DzRf!b=6Qp z%3rlgO%RWU@|ie>WOq?aKIfp zMWfJ;fi8UzT!olyb6RT_5F=sM=9#QH4h%5d*!t~gWJ#ha<`Frvh);&u+~9iDaXRbGOvBEq!`yPFAc^SJQ+XVuG3-L)urx*zFc**&#Upy*!Vu5I zMRKwSHxSXujo@JJ`EZ3V$zdPXjcL3MvE>`T2}(>|TQ96WB-5d#5C*bPqM2sEg+MD> zI06K75`gI}XnHJUJ}b+T^jyFs#$p=QZAv^d5zUO|7hv({YGkf5KsZrfVd`&(actSF zh0ryldB9~#k5`HVQisG)NGbtrRzrG0GbA^}AmxV+o6uQf8K~2frJv+Et7m0PnIk6s zlxZ3(AW}3xnBh1f82pQ*1RNoh!|7Eo*$Sm}t5>``Kj# zm7Zdj8j?YaN1*G&7RmG>occi~o4E?6ZD}82!QOPXPWfyp{Mg?nHEJYs`2;Q+pUlU5 z9WYnJf}QqA9M>mw`IlbK^eNm}bg!3d;<`eXAXk~#$&ntOwd?rL!^5Y=(dGp zD!^7>Iy>rHDofD32s0WTTlto8e2?sufL8$WD$GT9!JY>!VK?=*FDweuAt{mPbZ=vq z7xW(8+8wGRSP0_YYjT55s1K>dytc#x9s;IGh(qfFf;X@aQ4qynGT#~?Y_4Lhg2}S( z8c3QvR6jaOO{M6gQA!61fim$0aUUg%NfR0>5pYf3^P5q(dSvm4D+Y&Pa9^r?7D`c4 zsd)rw7PA7Z_y2|Luo}+_8B`v`p@s}^l6o!>x#Pia z$$Md`K^Hfphtd1?1)^9e@5Knem@306KFRWc^^*k_JU!DZWrQAWdn zi+D!%bRX4LTB<^nHp0ocS}BGlVv9BpU}Lvr*hbnX&x@0xXtDTq-yNs5BT;VWipN z@r{%mXZ$+}^dmpL@(f=lEoC=u=)UX0FnqBddNm13o_0VmUQtEHJ_*%h>`Q*lBfHbq zL^WntPkzdOQn*66;iJf=!@>;-i4O&@J8c^#w;p2&(#VapE{qCr0*qyNlFXWDQmgYb z4&h5BOa{1Cd^`Y(yhw-jXWa*I?qNEN9a_Vp>@x;FVmUp0B@fauAyRi^De|#?lK!UjIs;WnKZT#e90W}*D{b*&AOM5KPoNSp|Z8@JP zF_uZq?WjkEuZ`%`cpMZkC`>W~vIII{md|;*iFQ7g9?3(&Tg!#>Yp{T1 zc(p1tm}$j^bdl%SY|xts4i`Iyce$~rCf#jau@I9kOZPrAh|{Ir<;eLMlu4^qy6;ZRD0PMt*IRlcDe_eAc>vU^xHh6xvj)8Jfev}M7(Sq+y z7A~Y3dQgyt`V1>Qq2coNKpknzP$|7Mp>ww|M@q@Fgc)aG>yshg+CaCo&)rZH*CU^H zjUc_*HRW-cthqiVnuN4kRbrc7A7@|&RihQzEKJ*-nbj8|GuZ}JS|lq zN&A|3B|AH4#Xj?NT2HH~pPrLPD-Z0k#iMB}bFNAvU`~)UmI%v|NT$F~86KjrWSlq~ zgx|PJPC>Wf>MUv^lyx~qYZh--ku^CkwO0{rpFr-qjB-@45h{AgmLVcMPt=}GKccgE zKu{_fEjtj%q?y38r9 z`26%jkJh35*suH8iW@73`6GFf5!XGWB`?S0_Wwa&rAo4j+OjI5MA<+5N7>jnIV}b- zqC_1SNV`;7t(4Cd-FcwJ`m#Rs0E?$9+4RRT%{@vC()j70^B8oB2K)lp!pOd0chYuC zJI(zE)jHPuIR?mv*J??Q5yw4F_b#6k!Jto0cL0nNvdJ~Ws8lR;^&vMM$W3T!t++{u z_R(%hb@a$XY9g8Ea93MoayU2ykv~j#4z|x|3n8-`JcEnOaPb`c2s?Rr85|N@EB0E> zWs}+v3%`^n{1{WGMr)Hv_E4JHgX}7mVu-kRkyg%0>YnV7cpK11&uC(Ba*8b~2w=Bb zbRKo^K31I{h-#C>rx!yHgsTRtf`5a?$H76q%*}A9hv3R3KOKpD07C(_-$%CX&f1JH|hJ zap6h$R`)*L`NV)$N5YTq;>RDEd;&7~^J_m_hBp=6zoG69zkUI~KE)W}n@EPnt<=%* zn-}6{^5$zMpAD1@q?TUOp{J{RLdX{p@;TQ`-UUb|6#_9_PZ2_b*dcW+1bQ)U#)ro9 zI%!e*qNTbwgg8!!9~^)C#f3wxg!Ps6xdNzO65_g#xSHc{VdV2m@jKg<@DY50MCv`c za=-pAy+KC(%VgW0ke4R?(Y>hNWkoaRferD|Cvj=3Q~0~sY!z;8*2_f%P8Fe56w*zo z)AD)@f3wwH1n~bbM#^zVT~Yg&QQfL_@s{pH#0OdW{u4p)y>cU6GyeFoY$ByELPA)TdHK9xKELs-dVoI{ z?gJxpxr6u*C411a^q^IyVYcnmhdJbKeh`@C zkVyyRcAv(=cI8!)$t8_)}{bFA^^7+c2u9-R!VN3dbaws zv(=}ctv=&y^^UWx>NC%(242K*5*bgSSRYVLyavV=E^YOwATb!v%S{~Po>dl3#25I! z+}K1$j0-ues_4}9OdNv555-O7e^|l!G}QsMg%450D{*P7R}rdeig-+Jl0}dj7SYBv zPP<#EA5fo**FgMgT-xgM@Yf-J4Q~nqNN!oJV{*S&G=;8_$a z|E0LDns)m#{BEl+7YNgCUm-Wi;#m^GzY^C~(>h;;-);3KfiU1-&6`N;+0)7T=5_bN0Q%jWL!!P0Pth7*i%VO59sYuM@o&DKkb721p|sDcYm`;p0@7x# zv#4*74=r225qE9%P5A4Y<(uUu$rsDkZ^3)Z)^8Q?ZwtY{y#qK)VokpTSIgG##P7EH zE`cy?{chfbY?TAs?$m4@?kdEolHG4ZuqGt$dwAWw@S;Egrs7R~XyVw__W}vp?fY

(blSTM7NPGV4S_e~Q=La>~~BMamaz+xw@1 z4(z-Bev)`8gc_HMjnh@RcZ5Anik>pv2>w})_loC>#Rl|_&p@8EmeuSs*eH9u+bAT4CHw`iyBFvzuzk^)BY%l^ zXtBS-rLF!Nf5ETn0W=LsNpBj<R<7foh0@V8i4e5Z-9>qhL33sx&b~eH=P;q z3A|4j<7VbFpgxJWu$2D|m$v$MK}F8&S!GFu8}z|M>OXL^a1SbVaHG;ZSVexhgVjSv z4wkC*&2tAEs_f^)n4urkK<-&iHJa$qH<~ile*zgNCjZ6K3OCug|2T@n+yF|Yy0Z85ZY-B1o@ARNm^ zej}P)w^>|CS)soUpS9IaBDK?->*XfdW>)BLzpyB+UiO8ONozK;t(}T`3TQZ+p1)kHGtbO!Z_yQb$4lZq#!C!KiiQCh^2e(;=e!0Cz z&<<*}n%j5EO=oWJ#e2)`=L-1qLhyYZz*!P&oW<30dkDYVYQI1jZa<$l5pMGkzy!CQ zpQ>MmlTsFcfY;p%$oK9U=#W{S591^7JcmnL9mHSqyekOU&dT)5_CtdCu*R&}enf6M zv;8i-w`{*#z+Vu8AMF6nl34E-;%eD`4}Q1Riv+^3{TOc|Y^O~TXM4ZQCVedQ#k{85 zllS5kSbQ9pwt5NvlEt^8%$VT`#**A#jHS&OSImL+IBYGO`=q>=>hd(>N8~2Xcu;>o zURdQmDsU%4xO^(yo>dmX%Ae$W+hV71(^jVi#ISr!ZsO(cS!Gd#e;KZ}#m4cwttJG* zfKT!!Y_Y(49FC8gQ4bf1VF^>b?v@25t}0IB7ELmPOIy7hf6*k>7oY)HdU^v`w$2L7 zoQBbCotK-=)hporsxeC*5by^b_?}fFTorSKNny5QjuZ({CXYjk!hvo4wz7vpA!sb{NYhx#xM{~T3P#?A2KMQni^$Pq& zi)ro`CS+7bxEu~*7XvUrw6 z@K@n#8U7f4w^dsp4EX2rCc^Mk8?guPNf!%yHLnR9@$>K${C*8CZT0#1OI07W5!Gu6 zxo4FWrnf-SQ2YYH_d1PFx5ew_CdmxT%Zqq#dHDtbe>?>L!VcgpiPifeTrDr(h~I7X z#R6e?`6avwdAY#Tf8%3lPJ}VXg)KLn_FykbW%*yq>+S^_M9%p0*sSsLW%vs0{&HN} z>MQVc-=y(t_J6h9bY}mX@!qokYXtmjL-4Qb0M3$F{jbN> zvi}?KyRE)aAPoDzi8m4U(@IWff4`h3l`Q(3c}+KDz6Gzq?r+7Vt-cL^$?jWGa?FrX z-;Q_SF)8oG;~v(`eOB>*2XM93cj7M!)ot)yauaU@RN=e%!s?;#5y0;a0lzO5aL+2s zAPc@9SKD}R!SA;E0f8{({-E5%%iXiek_i4oxZ1}1Vf=2Z9}x%x{-eAJ8!xbC?(44j zNeoN)FT$xJ56%^VT7{{HyKds{arnHOZr&oFY&s2fd_4knN0nRjnl}~FXJsZ`75}z)vw|& zyFiC;e~p;-tdjCxJodc+@f)(S&W57=x|H-AQqr!o>Noi#3ojM|YS$|2w{W}g;!sJZ zMR24qLS?o2I3z1h`uC+Qob8xus^2D6t3u2&h>;UhM(aFB12HIYsh`BAhrBt+ebj{Y zG5ttZj=5z7y;Jgi(?3*wbAe_5jxT!)Wy`4#vLe6Uka!-Xn$Kjnk3QFTC3JY@P_-rG z{H`ab3UWrVMXI*Zs<#@mNTOR=nz3iSG`HrE`m-{Lb3WEU3Z^ae`5 zf8<2*fsu(q{*pMb4cguAo}udNgpl9&gj@h2%u|jNw`WIq+%pcIzJ&I;G-_TW)ck>` z=5?TEetLqW) z!M{;T`4eBt!WFAjK%=eGcS&h~>PsUFk&N$Lu~IHyqBvU~rW99~;@;(p<9Luak@}I* zv92tdD>pbcr1W?D(!ZLe&&`ibLh%(xr}HOIj*X5%_stbg&Q4DjC&s4oBeTWH=@U4a z>g2@8sY_DQ+o)bJWPG{M`e&Y2Djj_)R|S!bI06pZXQjo{7%oxL`{%k9mUW@&&pl1A z2Tj-wkHFt1({gy@M$#N@tX!&+-UiDoZQmob{e`D(3ACL;wnXlhn5n2TY%Z?*(d?Q; zT>r?%lI*YI@z&zK_4PdY}IJfr0CQ4QW>}|6ggSj|sMq6Iz?{|`S><&=tGv0<`!&%Y6S;r{)arjWwWwbTr4#z z#gdGsK@ZX8tq#m{!ZzZjPD--2(~ z`I6c|Wx1CR2U@95;i(PN()mnqT4`h8LH!l>g7VZWwF5H?EYMwpOIuxwzuCfdsI|Hd zKXH)c0Bo)GO;|GnYA2o)p8s$*dm?{-;nZVeQzxe%dvIiSYHaG%WBJ+H>DkBT@(<)^ z^Fs&E92&m+?88IqdO(ctc>S!pfuJ{$zGX}s)s48FYAqJ7t86@~Zo*CV+QKbrx$L>| z1On`~+-Tx>$wsbLX)X&ls?GJagKB`EJdFWXuuWTFvF*dxjge6}1cZt-{S{dH`a-#)YQ36_475 z+a#QeX2U_S3ym`EEYY_(eW(`}PYb$UR RRMazxP`1K7iwGBU{}(tk(`Enw literal 0 HcmV?d00001 diff --git a/doc/build/doctrees/improver.doctree b/doc/build/doctrees/improver.doctree new file mode 100644 index 0000000000000000000000000000000000000000..39ba32decf72cce06f0899634873b4f4182c2818 GIT binary patch literal 24516 zcmds92YejG)i*AZe74-^*oLD?Cu2#bgDHwJC14O>yl{ai=hfXxd+XEP&D%Z80+v7s zL)Z~?|lFFW_PvMWK1aeeU^Vaw=-{EpY~>U&Yzmi zm9iBtUvPVhrJUs^_;ti8dI_~n*9B^-ucoDpnMHHN^2W-RtET&EMk+mP%9JT?Ww2~! z*O|k}ZRt<1L{@&JOr`Y}^%Mu~QYqJSl)^go^ak%HC73s{X zV^;Scb7E%YF(;mwS-s-uv($UR-#gE@6u{8T{`3y@SdzL}CHm6` z)R#`qE+k{l)vWRKaa&t=*zz*Nl+R^0WX3Y-FY>7;+ zt#<4;7MK;UG*l{i#gb=bT+bv=?X+qeqm}(FpL5mDzS<>aw-~eH&bH8Q#a|o#W*P|; zgf?GfCyX6J7wpz9*d17Wu{{fav+>uCzd6QSo!=elaaVgT{^l7wM+hQIggADy=Nt2) z;!rKgiqV#}^MxElRl5(UJ!%`2Yh_v9gxVABX)j;xjrK(5XrPo1V~WuN?Lkkf)1GRy z1o^4-AuvL3(eirj(umb-FWaxzv`gi6y)I>YLoRyJT6=YkD6fZHPjwlso;l2FR7;H3 zfCaUWkuW`v@`Du*uD8@z-6^9jS1B``t9k~E)@;FaUHtAHFxm#ux`|D^Y`~bt*4TGI z?Pn)VV}^#`e?T2z&o?W^^g_O9sRLI{1+t*`PzTvlZD`}*RINJTJq0VF4uLWb_0{r} z-O<|A&Gow8GJ2#-LRDXJL3}cT~bghxWf|bddh5TU9OATGDhWpcV;&9xPyUM;; z)WI_ixn8J2_2H|)iN>4G}nUTkq&y1-6| zZ1Y0d-lW^og}!<-3pdHgei2wZH!kSKRSX1zMu^`6K_|gB4xu;t>Jk>3F<+=#Sgo(; zi#cmULR|_^zsy%}4HW@C(rci_Eq3DYwy5{+4W>@zvE}GXU3hD8I9VD>*6QkV?iPix zdK)*Ox2Lr0*ZcZ_dPfSw3h)#xwJE(*+?6%4WJ(*PYZBuaQ~EviPBF_ifmtepwS5i7 zF>@GQg`H6Ef-oYR^|H|xlAaudOuv|#S3&5JnCv+UBe#5!d)RAWwYyC>#GmMT&Le&VvA)z zIEiIHl(ILa4~BXtn`tLgGO8!@#ab`wvfIS2A7;DuPnfmV!LcsLCe(Fc@_Jw0P|c(< zolDh?tht*~n7Wm;SKhqQXvvjEU~F|udMa2fkJWi-Y0%+Sg}OC80|3IMx($;EAe8*u z?b~9EsXJ12!wiFF+&pGFUWG8NxD!0y<*Sdd+pXAwUQ%S{@)b9eFJ{(_EKOF5qwJBM zmCK+xSVtUd7F??fHo6||SOM3|(T*owbEuEPJLgo3s6NJu{y6)D`h?wP8ZEl*s=I3& zBUxpt6!Tfgl2D(N#Y1f;@+o-Zr+xLAK_1ohtP0)lXBlnVP^D0i`QzsFqAe&=-DAwq z#=94@*%?E5o`HUDW88F~Z(=&Lh^fD<2NLQFu-$#Wx}R;=68b|D2Dl3yI;&!?sA8gu zdVqEDV1wow`m%Zmt#tyIdKdyf;;TnP7rw7?hpy>hDua1(LOlk)9{1G~%vXG66s~e( z9L|o{R9|HFzBCzoUj}=PP^zzh(XaaIYayeLPr|6?^6TL8Nnd>fHSBw0EX@>_#FF1+ zex90)pQpjk7Onj)@b+zAeJAAY$w_$Qt`(sJiwb!c^YHJ2$!C1^JyzwOF!2j$fiY1Q zh$vMk)%RJZA513G51Y7NgeReX1h$^_)sLC2*)?o2{-4z2@3f!c_03PAu-Y2xXHd}3 zef5h_LC-=#QA>`2);xeKB4r2fC)ju^c;Qki|5tQ%%V6?5zTcg~Asec+`64fK6EtO;DO0*Gply)&G!@T~bDAbE(vQ+~{5IM{_PbGa9nC;qd;!&I{{xh$ z1=+A6uWruLYxx0cMczdhP#Y4TX5tyx!eO%7omw$2Ci{bbez)OG^rZ9|@Wb zP>9ry#HTrUhL*?BCq1w3*8+H&3kY}t%@fGF)p*$X(tIh$Opq4fmrvWuyPm&e!eY70 zlgnk?SiTlB0fb4`+MNzF@oEjYv>g*)V)HbRwij;eq+1ov1Zf8W=u5Z4aDh0qBl2Lk zosjsnvjB9&0U#jj0xchYPrC@%!T^lJdo}u?MJNmE=v9rwW@YvkbFHYQOiZ98Yt*D7 z?FxvE(pa?j({9L+(NJqU){cS+8YLOIUP3o%OhGH8-5GMN(S~)3n2`41jC(Md9$T*4 z812dASg4w52knIdbP=l(?EQP=rM9VI`frh0NKgj~p{GtHKBe%4V>+S}XrN2pb#q(7 zzm4hKrO-b3?XV=$q{?I6n!%;Wv0~wiPKVXcEOc|K=LuRf)Po$Du@{L?%kT`FA9JFc zUu(c5?W?n-o!RW{`$?_+b*=OfP{4|ExiFSAWyupp!s9C*9THA@CBQ^sM=O`i0%;E& z08FoT!#I$62pYygBH1Jj<6s8vdkG{z1c+dULy`EjT*!7ffjU^rumoBX>-;bQIb4IJ z&x6j-u&rVeQL928imcNtmYaj-Al8|PW@kmuOOtsoX=Xh)S;()ml4hb%8tpD48k$5v z2kw|0wewj!iTG3n`v6HSbFidFyqlL*o^7!?HAY#v(h*=KGMW=t-;u&(z51pF>o4l# zs(t?pLc9i?K>sU{_;i%;-Qfh1vBoF#zfwSs)*$J7p#L?{N#NI``9cBuv0Oyo&<=PS zksK}2y5wlyvsrQ6_aj6uy3}fhMiU#79BE6Iv7q#lF5>MR959QyL9mIZ=b8&*2^~iJ)hGq%sovfS&1dBMs%8h ztkEF6SrN>i!srnhQY1rqw8D?V-~)8J6mLqO44XCHkWHp)hVMy37RhLTsmpHV*b+L`tI}LJrt)k@)0EbcY+04MWEKlptLUrs&T@F z*{DD?WkXL@x%=242*+Xw+wmLDec7wx+|AEP(AmtFj)>Da!f(?)?OZ9|lwJ)Pnqy3L zZDr4fFqU1)b=k9}Tk}DrLAQ1uvVD4^a1ig-oPdAn+Rm5aNxL@o5KWLS05tkFPU8LA zn*<5>YZu~|Pj8m@4w-dN+OJKb5?*g0;_a{$|BFs=5%U%Fau&Cz9yrM|m+E3$QWcR7Cf^fr0#NbfSKYr+C-Q*?QO zz|!TtUFtS%)btLn(zgZFvtEq zO79V<=8U};m!W7af9FpBbRpL zc%}W)g(@9(IYcWRmX39FBT{Dy9+3DZBtG4Yr&bL3yG35aN^~oJJ2UX8ibgst6=S-M z)0%+f?Z|`1??B?yop^>CXHueZClURcrGLpd6OLVXA~C2rC(U99b~WGI#hE6$;kEDnRENe|u4C_5eI z5N$5#sU-&ZNx@#n%tQ?EQ(UL-zoPzWpnzsSgT$xL3c(I1V2NcEKZYywNf92iHw)lB z8nDrn|6VC-Iy`<(iZ`YE(5Qo5_ZK}D&XTe4^T60(Ec^npeY#J`#m7Q!b}|m$FGaC& zkRCuz&0YfL`LV(9K?bPMB*8HEIA_jkKFmFVa=6hKk@)l_JgbMfF9+2(r}uh+I-##{mHIL5 ztH{=4+Sl;Qr?1Pqj&cVQpfRx>(3k>IWB_|o0P7l4G-v+?SJC10P!r!o7KC~ViBC^U zeeFq1T6gd~T765(zpcxoQKF6u8Lhq}P|dmcF3M%JdPb0buNEn~((gA!(djT7+<*Rn z)7WVBL%c(ZA0hGSSv0u~dxdcbwLYR=-Cc zH2w!9KK&8TP~%MMg^X5zlA3>x)zm#h;Ly=%^%sF??oEG{a*j~{O)&mlW0ZKyX_hB} zNVjZd^N4FGW zyPgR1!$jtBs=Mi+7R^T%w7dWbc6sp(n;R37<`yc6*`Vztw=tiCrQBG8wg&|G+yRMC zJL0Jkg1epMr8##yqgzr z$7<42WJ7n|NPOzSGxSd;C{jAwC!N6YdIfS>1PRjn%1gZxPWfEG!T90RAp#yW{X_XB+U_@|VWXOsBTo*!1S3e64nxj(Y|B3) zAKUA9CMMZUPR#H)%~CK=hjXguGAsrifp@6#NF+Y>;mK~u@c}7)4MTT2%)eG^uoC2< zKxafAodh%oubu=g5~$`t(6}v}+MIzbh4oAZ~}_VBy<|A3QrM`Q#FVTP@1FknXt{)EoCBw^)x+% z9)ZwmP=s~)q%xf*)SE6m*GTcE^g?h{Z=H!l==^zFQzOcP(*P#1U3eBJI0bMz@_jl( z=yaqzf#;;UW9Jd;yqhd1LvYZX_y9~~#g|)WuVZ4t=Nw?%OJ1UXLp9 zngJx(?-BwXP9PzxXu@kU0MIn7n`!qUK4(=*yzxCr1~zq zUF?`e^#*~19I}0~1h-!9X42SZX63mnwP459<&Gz?&b3^<903~a%{A%i@A`Q*rZ zNBT&3O|>Ygi0`Gzp~~53k7b5%PNYOjlY*Z^EdLuT{ zIIUx%LD--m(jaUw!kO(lY_I_A)HggT0tD_-LV_JEJgXxID!{N=zJjV&4G|aO)OXD` zpNs62&HHB_e)&|9_v&Na4(lBuf`Dz=KU*(wb*&+qHji?Z_=#@XfE+MChQz0{rMh+* zCafJD=eOw`DL+@2r?+jWh#qtR)jaV5Zt|s4bDqvaIe2Ps37C~p>s=GBJ&D8?boSSP<4sQ5>ctad?EdapQ z2ax#mLBXckVkRQCIQYz4DIXHB568fCi;03$*KyGc2u@uOIGO9-AQ*3qFhZuA1jkDk zoLX3a>O1m{;NZ8kIG*>ifb2&%BMswIx8S!=w=!4qMKD;rO#dc!mziq(tdk*iHGU)cjbirXF&F-ZY9&eOw@F-6Q(ei#{Rc9G|*d zFn&^Fln(*Xe>6CvO`ie?6vZ68;zQi0nP@O``i!tzC)z|<>9Yb*eLj0L@}T&8koa`3 z0Mwq(W<|i;c^&pS0sDLa*64ip7f=ReaGjd-+4pg+Xu&pdKHJ$9VW*AGZg@_97W;n2 zDKoDo%QRSNW9A*cFSHEWF>|8Y*Mj#WQ_9BBWGNU%qNXXy6KgLeCUK?T2L zRVd5pFAMNjG_XuG!_(AJ8%}yu!M0wO_}DlObn4^29O~(zucG3s<*i?1+JU!zT?ClK zTc2d$zO5JV8>k1JeiMmLPYIO{Cy<(TC(0B}J}n^M(je&!Mue4SAzLY!o+Sb0T79lJ z3l%Flh+ZIvGrKsN&fmY7#K$cW2ypFu8PnmSzTCnjzGI*oO%Ht=v?Ap@admu0xTsf0 zbO`#pT($2N;qn=ff(E~b1baKeScem^&N`dW;12}khZ;n_0$yq2%qnh1U>!n7Bb9=e zFXKq6mq#SuwNwSCRdHIJ4?5#?s~Mc={}C`W*2}YkyIwER>Ea)ARs99>i=+J$Ab_+# zMdH)Xgj|OcFvRj{X?dgh=K}Hz4I+y}-RDP(!(U2K)5YO)QoKp7+6nhrOa`G312) z!o-7B;a`PutP20enR6#y75*I{@RNTa@#&vG6=2vLP8IUmIxWz@8R%ZUnyEeq z`X7|&P2}hC3tKbOWH6Auh>1;PRyS6Nfe5nx*+u~C8c4K4oQiU^58l%VH<70y3zAGn zf{hueuN{U-Yj@{`Pm7ed>hky|ay1vSoM{uN=3LA~xojdQ1nI0=q!?glH$>6tFdIC2 zwsRWWM4p3pNHG@)zP!UT96Ol=q|j>^4hQl)eZGJ#h=FM-qIvqZT=W9w>DvKL=0e*G z#vLMz5Mf8b@ygEA*`;`%&H}QUc0wA?(|5*ipLSuc!g=~ac@aa=A_lFWr?X&;X)&iY z^YmSj2c_?Z1e-2+hDv8rqVz4y(|4Ded&Fw$&LHsVXr8{OKs0x#y`-Gy>3a*t4vkSx zd#)@M@wtnKQ%?A}MSm?L-#f|bGngCIjO7y@3v~h$^vR6AYJF6Si3by;F8l8!-v@|b+@(l->K3vcPM{9fvUCm3HgGtoy+@#WHIxj7T4QKX&UEgk&>oga zX&viP-?T5v!>zLY1mFGm>0qg%ulqSA{M24{aSxG_ zT3$LGrpw3<O<#nH@2j6w#Iyx#9hZA%Rpx^+W z*GisDgPg;lo$=^H8}6uBN+xzZr?LM01iVAltC09~BA%hDnUJX3m{ZLPog^hd0wPnA zDeA{NP@Ig!r&I6@Q83M5345v(u8s=9?`ixJtxTQez^3_*Zyd^vS5=0fHK-TgN1T{W zHCdp>#Wzs7sj97!vW=@E-2|NuFev*BBtE6_gh&n(W<|eF-s?S!Wn>kf$*GQgR{Id0 zg%W7$^+sG#pv~pww+q8N?9^a{zZCYD+ z$ZHFj~hsv5PFDZCWaz#NJglXGi$w9NFO8J=~lv3M$^5E#VjTDdfFF zP{lU+@V%!?itBee%v^j+mZVCIn_x>eqTzCdzJ@406v3DkBtESdL>*2*hZ%y_;2^we z8WoTY8bk&P&4(Os+Pcl446Y{1dznnreOMX;85m8r>dqFDO;=;*Nb#ohd^laW2{f_i z%CW7QJy%(Lor_vEJFVIYOrBvGvpig5>f$^#PW6xv*nXouy3Z5e;=6PnGVwtSo<Mw&Q;q(J~W5#K%P&VxMUvg%EI@_SudlnXmIJBQte$_*ydWE zhh1uyuE1;TTo;Z2^6js=#OWNNL9h94l$Yo|c>45ShBX$1`pobxo_za=OIM-|AHF_V)`iM7B1 zFW>vpOII<-OeijE$!(!m<9!^r!CGY&6_+`3^nE?q0R?YIUq z#^VPV;S7}17=4g`w-<0vZUvXhq9Q&G{t)L)wTkp%{xK7$BP|#K*A8EYcNRd5lqp#2 ztpZ(-JX}gT8=_)Ppg;De8w4<6n*}cbyOF_W80`Un`UX_GiQ(+S`;A5NU2z7Yl;qkP z{c7WLf&zZt3@~FJ3-6*L;sE6WltH&}_Kci0SQ(~U8Iv)Oqh{MhO zai|mke?$Y%;rl`O3LfCa2~zr~z;4fGMkVTDeDX&eC`9;M^y!>bX~d=Wu(7Ir8TucW#Zv z=<~=nW>mbP?nCGc{5uh#;#PE@#*{C{jZF75*i5O?jqB^^0e+vG$>m*BZXnf4d{FAn zL{Ed;6Nd5|=phE<7H1A-M)Nr=;~qxNsy1UzwUg2#3^->+|7pi(();%u(7o@W2OUC> za%O9A9Wy0Yy5LjwH`Cojq&RMaio!8_TZH=6N-D1D^Jbm=o|cF4q8Rr z0ltaMjmARV0ayh~Iw&aEvopa7A$kfRXim@)ySFqGm!3xEfU$$LnLKWfCB9AqEtzeY zqi>;f98+U+G|KumvUsQvT}IvecNlkN(Ad6qK-1&Pce&COxq_LV!Ec|whiA%Ohzo>w zm06B+45n+f=V|qblA!Mc81s@JNX;KgO&$JPfl7$3t>$Ej(Gpw|^dms zEBqEDTcTvobtIU!7+kmO6C=Cn^M6TuuZpn%z01TM>3YKRYv4w(nt51)2?*5t+X>37 p)8N`~+;*+CYRn7=aF>1u&;j~Ao^l5oE+3{p-~}Hx;8_{$`9GgZ9peB1 literal 0 HcmV?d00001 diff --git a/doc/build/doctrees/improver.ensemble_calibration.doctree b/doc/build/doctrees/improver.ensemble_calibration.doctree new file mode 100644 index 0000000000000000000000000000000000000000..9ff6df3596488e5c7248a673a7c6eeac7072d81f GIT binary patch literal 155713 zcmeEv2Y4LC`Mzn!h~7Inx-8i;y_jl@Z4B5LTY{q)ahA@$J7h^WCt(a((}U?H^xiR@ z6k4dE*HA(TC3FbACe%RqzwbLcdwaLLvSk|r{Qi3M?A**(=bP`F-JSV%_mFv8T02_0 zyYg+NhW3uuY^jFT$=UXG8A0yJ1+VD?b@sAKHMo?d%q0#cosmi;o5vc)Awrzgz}IX2Q8 zdf^n?4cWeVw;?+>4J$4+Jw0@`#FaarvV&M$dUSg8*<$4wEHip`gKpNlUR-u|N4sG} zwmplPHajM3THvgC%$LJW$K+8M=OOK_yJbv1)83LTE;l`W%xsyV7gI;WuyKteMhm0sLKRw+wdm7EUT^lMV znC2-a)7hCd#TBNf_nJM~LpnR#rdG_NFm~KDy5jV-H+v#_(Y-Fyt+QHdbNqz5(j1@S zLT82soaI}xrBZR_>FHs!*EMIT@`&T$OgT zH@9XdmWpe7#kHCi!@!lYIA@khI0=@DYmYCkQ(JwRT$)zevB^#wP$K(W51)S1h-AJC9#ZS}9Tv!xaoRaYA|#Q|V2&?`37=0KH; z$F3<30@iw7aeW^vN2(3H;)bqH_(ooFW5=cH;=Yt^VP|WKn*eW9ueh0`V|{b4IM~(a zZR+dyVGH@~p!RInpj^k~?4aCw8w|?iIy(0oR5C4t+)cnB2pVK5g0}c815@fMZrQY8 zS7s9L0E$~RE$Ay%+`6eI)75426A-hd;x=A!+uEjnt=*l(D;0-~Z(6XWEmJDt_jco( z`c1$!is6_I9p5xxzO_``etdCQ)7nm;!fj>J4gHg{?nGNDUO2uMtj9wV832mUyaGt z=Qhu6k=ru2Rnr3ERNOU(j*qR)4H=)?ZhUU&_}up6b0fxg=b%^)>g1rrxN2W)%pQQ- zwZ>iuZaVVN;%;7Xcc`1A5~I5(WV$Eh=mlx7xQFusU$@-WRBF!(rHXqs)r9WV_Rc`d z;`rQx8LGN~Q+1yl4b$uu_oZQYe>bjtf0u#awC#OP-JVb(!S8m!t0mW1&7l?D$a=+z z>;}0ts~haers~Tiub7MNOTI!!pK~iZ*R>WP*a=&Z#uv$PIZF{rPJ5Jt7bYqVPPVemj<~Z2oGUw zV|iQ~I;RfCv{1tI)IKTIF)`%^dA6M7K*qbeR6K~G_3K)OIfC6r{Rm8TylTKg$@rpI zi>Vkw+p@)Jvv~nl?nbKC`d9zCMt2ntmXproJ8AcX>iLNmLX&U}o~tPyf&o0# zD;`Gg-3J5Ln9G;^Tdow&z?gzdsZ3j2swlKd+h34qoRyq0i@jdwW3AMRtT4m^I*`l_8sy5&59A!>ouYYa{ z`Rs}G+0lW^=6K5&-jm{8!NY4MD7KcGy+O&c%1if7^q16&0Y7CU&>A z>FRezdXmw)VvoZ=j`r7c)b-C@cOR6Vx2bFa80B{i9dB(M9TY@mCDe&G!E$ z^#5kBcnc_USBCUoD8MP+O6C95lk&Gg`I!#Z?a=KGuXv}^?dBeIXO0K6FFcFD30OxD@YUH}37J8~4ReFaevI;{8zM0k8PrOca5jhoXY& zb9eI=;|e7ACI?QM|<1Un&Mxe<`Z7=Nyh_DiStd< z{Dc;tq8*;DZJIxOK&NS%Ki4(6t@zif`8gay&qS=!nQh0$)THJEaF1D2{2PQl>lOcQ zg@N~TI2i!KIlQUg{`u?_&Kt$&oBE;=*NN#>XBK9~7qC!3T%JUl<|}oz7GJaqHZ6>H z=-q}5o35kFmxM&`%9u&nuBKtlb`~qm(KT(;0^pc$&0X0IuX=*R_K)gGG2hO+;+o=1 z=>N-J@s*(eulhq@e649w*ZqOO-LLq1CUn>k-+(*t6u6gNAsAnL6Ati}SNx|N zyqB#5^tJMeZ`C-35s6O`>&fzyyn_lAzt<05a~)5+YpE66#_&A?9nx z>9)UrYT&?>{mJ09x1aW=G~bSm21RtHt)-jMt69R`i{kE;JFpEkL%R(ZgWD`saL-C$ zs&)+)YxKt~&?@Q>c=7~npVtsWzldbAsZw`mXNTzm_e@C?f&Qfi4$@NbW+>2N9H*rT zkDU3iagVLVwq~q#bhJ;ZG0Ol8)>;;YXO<&L&sQiAyu7fqq@lB!yHhtS05Cl`rQy6& z;2={202&`XJ_z3wNHO=Sr0!vvp$ZFi*Mn^^!ywO1d`bRP@rxo z)Q#d$X9U*#>BpPd*mh?Vb!SuCo%GMD2b)1@KO&JS#Zz>}tjtN*Yz`a&c`J%DoEfYJ zEV<*tjMrvZ1~OZq%`;o#=#M@Zm)&BJ*-Gt6>z!t6fv2VS#^B>pt}=UTZmt9M<2sPp z2I8ViL1tStduE7q(OJrMflEPVJGGyBbj~gQn4xH?y1&R(F8i46iE>tfMd;SU3==P` z>X_m9<(Uzx*QKwB?Z0x^1|UZXDQG3;hm~-fkZooj-d209YqH4bI#3CC>kde*i{!8D zy5k3=CTFl$X>79HR<_k=q13yQHgmDF*3ps|EjVFGHQNHRX5Ne7lm!{$2Ce}JDHZvS z)^*2YIjKN<;X?IpDE)I=QL6Savjh9GRgNo1W~4@A`|)N+HulTpF3!#Q+K|}^%rN!s zjKVXcaI9D)GNXN2Gtx`OIf2=Qa4ZMqD%OF_7&O=Bu-IdE#V^l{Rei23`4s4zxh)F5 zX}Hv5#)*CPDJHnPGL0me3kyD`35;N^-B5UDcS*Guj_f9;D9k!o;W0e*E=gDJ# zmX)hoW(wLcCExYvln0$u&TTi+}EuZgF{-l&?Y3b-@689u+rMBiL`se&o3Y%_R;leI0 zGKr^R^~=xC8KApV?9_HlNg+XSatC*|xE>Z<9aLU8Xpt*`*((Qo6=N?!QWL=>Rl*H( z##FR()vWsfWRqvi)SB|+0=VA=K;p|6iBO@z9D;shtZCC<$_2@x?2BD7GlywdBW@5Z znwi6i-1vWP*pEQB5qn3X@XT*yq`HFd3k(Q*ZpKQSIZ7zgEeccZd|%;&$YnTlv_KIP zyOlU|jNn*zGslY6aRDn3A0JCxU!YXZ8z&H6m={OTpNPUUC$R}@DwXSH)Z}DAsx_%E zP%fFB!ZNbZ_FJ^V6sMx_%xO5fX^7%94HatitFSN%sBt<9&zyl{Fb$rmO0wj$0Iyz~ zn}A)-Y?od0G-m?=tj|H=nRCUBCn$94JXNBssRcWAKH=58xsck7g{jFk<^sUM`9c() zxroSa?!H)+C}%1`&X=$}1hkjphi5JmdR=-OcX^ho9chq`!da2>Qdo!&8npN~loxTF zc_w5rNd@<&ty*onT~K0&yPu_saS%u^z%OE1G1i&-`y+oy$-6mskbng5k#V|V#3>-5VYcCq#f zetYIs3P-GwEsgh@>QR57-Zb0mEUnv1oi|X2nchUSM9}#Im(P^{FaR zBf6majEs5e`j=?`ZE4fHqbuRny;QB;eW(g2T*LeO)*ZN-ue`vXl1I8F&S!4@54%ZI z@?ANd(mqeniXGt4enGekw=Yrg%vU0*OD|HT89n7|!IHv^JtNa^ST;r&nE#+11N$us z&wPiY^BW3|53m1PlJ7;*2WLG;LRo%z&4Xso%o_lq=X|WpEWAQnJ@W&<*8%3by8wt? zfc3?1&n!ss2r#mx+4`v-^$luHgDu3eVlSr#Eil!>C_J+Wj;`Mn&~u0_Dw@SYG`_7u zA+|W6V84V=myAQ*x+278$NkJwR>;y4vWyjyo(p`J1uKN8?NwrkE+?_eTd_7oS5PH7 z3@$`hBx8o?N}^ra(xx*oMd-e@daXTl`s3`#Tj&tBf2~;+o7(FQhrNPzl?46R6}ri) zgu7r}4HeJ)N+fmZm3!8aPM8vEQtYzJl(9O?#+mUmYXA)nv?dDAtc9cVPpTCk%4=Ja zbwsjmh=ij3P_7lQe*lEqb*#)Rl!G&6J+ONn$gb}LK(Xx#-hNk5sv6m5^f_o)fcFkI_uw{6k^A&NgQ{x9PQZcu1d7)ZtQsL z8;so^qTkcfrxy;!F2%G?2JBu5+$fm+-kW7(HKiGkW^{NT6rO3u(TyPm#*N{=!psDi z5IsSaXh)pr7Qi)zt)k6Z+Vn!`L~z^9E=)||Ly_#lB$m9<&*2BQ5`^xr)D&2m+2mOv zvDcF)wWk{Uv23hV-?X6-cASjDGwnFiaVq4Q4${>ZsJ!(TuB-?5wGp?mNeV7cp;L^D zmNEQC55Wa_3P$ZZ;{_`)<>#_YN#JSe39vZUg5uuXGB>joYLuL3y z+dviB3RBb`&Q>@8Ei=hhm`aqh3M|6e3I~c8vlR})FYHRFUZ-ckldRyD{r%m~t*X)# zy6_U6QfEggyH%s<&N7*R-ND-Z1F-DRFPR|D5sb$k`6&g{WHO`_9L&a=xs+0H2$&&O z4n^Ua!*HxjDLCAhH6y)zvcH-mh;R?j#n_jj%WC8%T)- z)5X4e>MJJ}98HqBkyvmH7{O-8qVUXdlB(z_u$!1ia9PR3g5w20!NLo&QGKCOZzUF- zD2!yiPXZiz_cQ<}0|gqLg2FSu6+f#Hb+8&Sz2H>APqXm!K#XXwVhkZxslN98PZzsn z_0JHT8~2l#*VR6!nL8Zz$joQo*G+CP~0f#nGVsjNBP~>V9p1DSBtRfT}D8h3ZxBIUZ+I1l` zJJi8;|MdjT!gl`+z|)1n@5J%<0Y`M^590At-R@_Ea=U=0pu_!gVvh%?#Z3l=r<-awIS7r$FNPd)ZNyF=c?PeOc;1Jo@mKW1hCkh$D76)%)L+m zMrY@KS?BKi*du@U{eD?FGAiZW_XmjF_`e&42O%Fm`49@vJS=VN3jWa0QQ(%CD7X#& zh)^E2D7^Ufhc4I#e@viAUxRJ%#|7s-^k2m4iGUU2>d9E*`U0ggo}MDy-9tZ(if8^R zlDKW~X9TI%q`pA8WcD|fk!|p2(F#-i9ffC}!_kE&#VI_iw!xnl<_iHPy78ha$&&v8 zc;RfU3&095iTvd-`6~(Jltjf}CERVby@rZsUKa@`c!+;Pm8f`%BK?~zIS0IjAD;QA z(CgBJLl*?0J#B3Y#KgYc&qkQ%GM$KI3VvIhqJpU-1;0bMTT6Tw70$WR(f@#YrX(U_-^+A)MvVZ?Rxtqklj+@SNM&s5ggII0@-o^zEM3o092m# z`VY(M`{i%Z2!nlx!ZZKH(HV>~tih7@tG^dbpZUr(zQsc8h4TOk_Vc2^9!VVP8gRUs zJD8c@GFm{4`dUWmS)^SM=$Nalc9GRT7fb!bdLhf&#!`(cQH#1*T9}L(ON)qhQA?}$ zq-a6DVrz}(MV(RI?it>#*@d@;@=>fdJT-+CdM1(tn~Vyo#S%22vK-UJ33oxY1S+0c zQY3zY2)WTWmQrIq+e%c8(@TsS)Kps4t8THcbZAd(eBBoyL@ z(@Fxa8~~x8i-vi{R+RaRYCEHC9 ztdSE_To}Rw)WOB5GF5|!Qy?PT%9o}MH6R+EX)DbFReJ{>+;mG&_UyNDLLb;&I?N`7 zyFlF(70+xY61zGXaHI2WEDmN?rr$vVzviCuOrrVb{L3V#16-AJo7~Ph*+|v(RNTh z>O@qX<{HV;?wdG`c0?15wG#@@?2Mx`7A5o?vZF*ZIz;1JD-^Q301EbFgt}`SD&Hyk zW3sSTi%b^WaN8=l0Eykm*-bsz-S)uFBWYElF5qq; z_8{EdKv?2SCgS3-eDHE^WX$@p+eY8Xy+jBE+zcn8}phWlHF z)&Zxe678~czyk<(4mefx2U_~-{XiP~^g9PtRz9%*USiP7b?Aa>$637jaDoj8gmZ?UH12lD8J?%&ED z!^+G~G2DU;$?WwRhCQGn$Fj8646!rEp&8aZ9))L4z>(flCDWWp+WG>!AOq`Jomt&c zJV~riwyfbtl{<>3s6BB<@wWooC!g`iEnb2V`aZ_o+)M1My~I-?E@m(BG&FnWbm^jX z=8C<-f@uH!XO%ik9&dl- zM~irG#GvN3Yy4ybR5D!|-za^2`;g&z%iJ`U*A-ebZ>eaE91d&v50n z@+(O)H#Q8f0wY-KY82RtkyM39f!)Mxfh%lo7+x#*brxQbjp_@PdTYb*dSN8%eFNao zn`h;y>3;_xboo6BY{-a{)rHcnF5D{oqu@7MczP|cnw3C8998P89m1Q$C0XB_1?LXo zEs}bxFSX+FmOsTZsV`7b&fT}M3_sX%I~vf@J5YG$P8{7ikV5Pf?e+%m63*QgM|)|N z(*;jre-^a56J`1L01o-wh^r(v_W}Y%?nB|3`^CmuoMI~#c|bT1S{z*#Rm@Y=;~@c) z^>`R?=)p^>3R?3B08rym6rOoZT&x-t8mQs+10NUKUqWbhV1xa@CkUE_{lF)Ir<40r z;`nsH5qGlN0~vta52PvRVb8GSe&FBm15aw;Xk7|D`MW9_74sbNs`dlfdGdIk zWz~M*3uwc@yodsiXyE7^m2&LB#O?>aB!ZVi1oj-}<~Z#Kz9Mk7yOig+R|V(7%4=fx zx@D)YQjOI)5-(N`tw`$m*>{pY>FpRJpi0(udy$*QYIN6Knfm(V}%)^ z4dx99fKjRZFXeRmCi~*=^u2}O(YL6LEbsLFlgN!foPFO0e|Xb7C_MA7w5cojM&}U2 zp>Vq4PTqS$dEcV&qSGIgU?=Ybfs%daL%?+#^pQAy9B@KFd=g7rU!YJ1!l#70+n~=- z@yx$O61S1}Z$YXRsV`72nSIVOvXS=%T49MVQQ-L!99>*doZ_l#BkyZreiLA#8~;%y zS@O4lyN$f>ME>6}`S%Irltjh*AcZ{G$eRZhJW;|X_hjmPszk+86zS&&T=!rLh_J*{oXG>A%YjGuCiF}Wm-^hpYusGlmj|(1=3D{4J+mTJ!ZIh>@)BSr)uUd)-q3n0 zvz+Nus;dACCS4VUXI8_}nUq4UNt1T{ekGbzh{m^RXytQtK*4?up{^N+x;DI&=Y^6U z9P7dV%vzS$+Tyj2<)yEd%o_Q+pup^Ab&RYty1ld zcC|Y2u?NNAU7>>?V z6wq^I4HwOb5RGrEP-N`@DAW*=!t0S^z>Sbmp%V%ft8D;sTXN`0;un|#Kx5S9r zMV!Z2&NiZURV8Xn7g1x$m=QHjw2hWFJ+%66rXrw5y9?0DIPxEY^Uq?a=|t9)paGTT zaPCI93#{Ey@eJQRi6M;MQ_(&45HKmO*bQ>slV#jp*c^45DA6(VU-bZLI8xKEv)n+tXhHJ>#%bD&4Sp4)kOTpt^>s*tjLyj%c&mq1Zqx; zvFHXB!p!O#3A3Iz$Z(ZkerjbCMAZILWQtXUm;3G$V){&%IY5BWc$ledNw0-Y z+qcH|{DCWTAkdo@_fhdohdzdD4nixxNo9VGAGyW2aJ{O^; zr?2Xc($}1ETJ#wmLi+jwdlCBLlsQx+A+`OFP6aX!BSB*<%fkV}h#!H%Ge?RM-u*-T z`T`Y#Pa&M9z;?e8%25_Yw@OwIeSml_JZsXdZ%sBg_wI91W;%7X-^wvZOF7L$agQAx zBk;8JIvCC#zfV&B)YDqCIP%&RI2Qahxb7|N&i(ws;-}!uaZ(~QSmt=P=##s%;$yKN z%`zu|N7Lr6Q@!|lniXkI1if7JB>c!Nqh9oLE_yN*4OTO!&R%*>+yV z&IQn4?gQ<1EpZbViLQVa}wM%7+ zXo$Eu$6Q9_#<^>%%fS;7a|H@Kjw21~3f6uFj!-#bt`y2u7R5yjSJU`0Gjp}t6W#F| zfv2S>Vwh+3gTbj3uFiAE!Kg)XZFmX3tzNt+Y?T*@~Uf zo2+k8duTs5x|OTl8-94z{0_+2$7K*cv9o4HJlO7N|NitpQRwJtxRur4=(*>OyC_d<)G?nKux zt1e=0WOw|%;G1M&>;>P<#ue>e@CN8;@4d&|0u~61TTyuCPdHZY0N>_In31mS#c-P2 zNp=-tr*dQX4m3xJ+oGohvKz2H?Ij)#T<)z@W@qDDWtf7}-&y=t^bY7S1~s zM;Ta};_cKC^4S&#PshaSw`3Q?N((F_$nK;fAW#lxyVft4zJB%F^ejy~7c;|YaI zeIn#!r9K55O7YWgJ@PW20TF8c3x#L?Ev{BgDix^dcJMzJ+7}@-JNX4W_+Jt<3p@B< z0Z%*lUyI{60Y~)hKjJa>cJQ~DgD=!I+3(V6(&4^64ZukFmL>P?zrzna;6#<(2HW?l zXlzU$T(MN|+q28$F%Mx?`}Xsq4Z|@X3ViYkM;AquV}~Pl-+loR^bHYQM=9>Cl+(Wb zf&y1NO>mKB`U%dJ(uKsX#vkD@?0#@+uue^b_r+(2vMpWv!XlLX3%ayfnBDPj z4Hi-VBlarqyDusPo3HH~$Sj67jNsxZJhOxl>XZiVBfw;|uD_X*bIP<7h3m+7CjR<Nb=Wjg;_=yF zvpi+#ZX#h%(S1|WtU$~cn)=~WnvKLAx@Rs|3~yBZ3-Mh-{BOhIOYol>gXU|gNQo95?dG|d|LU0{FpkZ9-BfwmiK zq9q&tXk>vSL>1OzX)n*L{mj~E!AP%z!ZYjQ=wg+cC{~*m^p!NVwn+@dFmI_?HS+0deB9oJ5Y!A<%` z0wvz0Z!GXME&FCT^v(L}Y^fQ;Y^EyLdNMw1fHx<^E*ix*&g!k4|3%RLxz%*vz6o@R zxo_VT&3LX%irTA#au;>$zPUg>-n#Ra$$oy+3 ztNm$+vml0yJ-}J&St#)fxB-vprP-Q2^zX^Hk>zktzAYP9o1=U3Az*<(*bW6A&cm_t zo_u>>!VIl>&cQI5VdQj`y}hfvQy&gUt+JU%;1?drQ{BF|FFM{BZTb(ceSDMX-f*O7 zB6CM@_q`)Y>}vy~EX_`U!U#K~z(=yg%Ekc|vQq)?0!ItJO9)=6iEi4)2%M-1W);3W zXIHTr8(|e(X^$gl7Ou1#fu}3&CUM*?;E0ps?&9&&y3(c_@JgEo;CYvqHoDT@13&P^ zEF6O??Y&g7C-=RHSCt{lE|bT2mQ`2U`=AX&+Kd9v)#2#Q-jvhBm3Br16G8-bZgJkG zEA1A66UW%J3eGF-tk_Mo?9zv0e%jr@0rgaI_)9qz-*R;eljVg+eFDcd*X*A$`3%03 z2Y4PIj^OKbcw!;b>T3w4H`t|$;Cnd@W)k$l7_hIu>{&C%9{QITc@08j(8`zl1tK^8 zWCm$Jr~?mfL*bdpQogR>j~v~~&P&Y7Z5K+1MbRaS9a&B$`pitHK#{Hovtto(W#t-i zDg~S{iFC!%))y$0!PiZ=%gWs!70*l&iM=ffy`uI2HCJmDU9&1RETL z0xy8X(M2%@DT=G|aXn#93oy}vgH=fudkEmVW{s-MtZdbD=;5#6dMQ$x# zozK{HV13kikpE5QhMJl(!?I?V9+=VxNzbw$aF-dDy|@S@JA)0)BYrM4|sI zI#*xdc;Ff_d;&O0`d4>%99ZcHu)K~%a1)7gC zPGf0TZF_VLb2<<)?VW+bGiT!HradZVr@bOH#kFMcI;yiQ$=M<~$C7B>CshlVr3T#- zs+U;Yx3z1`xqzeJ=ZX4!pPC;DD|;wAc!40*7A$x0LcpOCU(@M4+gyY;^yOj{p1DK} z?6oz8sV`0aoFe8@wSh$uEPnKuFqc6*biEt}o+QPQvHU+#B^Wa-&0}w+>Z>j@q~Mjl zg6AeqHg4+v0p=RVdKS(K^qi}tYt(bxRC6`pnrg05*RK7!bgjO?v8bA8u9NoFogKMT zUrqwo6X+(I8&JXb(1c%?PR+tV(wM)GdR5ouT5nUOp7V|YGF zpql|6Lj}nZ!mmrOHH!nmS>P|RGU^Mo55+vevQd*t+$<0tS@R@t5mry3z$fZ(bTa|f zi=PSpYDu0E$=@tVc_yF|<(c4FA^qJ)ip~Vj2~usYa#x-Qym}^h0c}wEMHF}hS`6$= zKw(ug!OpWb6U?%|nwPBbmnHlaD?B~7r67W>87m^VB85Ncn$f+BJ zn_p(rnbI648!oF&i#H)UX4B#=G$Z*#dTHQYceG>iw%U8#u>gim@-pw>CpIitgf}eS z6%TG$yoX<&d0+Lq^nhO3pq2@OR2D03_k;2$ygxDP18Y8Dhy3k`4`ml@M|{M_W#?!+ z;$yHtRDXiPGoRvExgGJDFJVTyu2+L#{zby8;PsVT5C2AUZNbk0{2Ud}e8Hyb96&k% zrg+=yf=l`PQmEC_RypC~D^gXY{C$lUbnhD!NQ4o&owO($lNLgiDSzJz{+)%-Zu%YE zu0m~_^7mgcP1gB)z@hW(ru_B6J|H?c4+=b&j-xw~uuHasO!=En@cAvgpuwruR@*uY z2qRhBzJNp9UOK~qpn{V9PJ{XXgD5R=%N95C9xz+Wp2^Wl7CV2c#0kJH%L~O1tJzJZ;-4ZgvxxVHPMDKTnh!>Uo3>`d??x%6C2;ww~o-( z_0i(;p=tqx>ygGjJD_x^{-lUWhbk-!S3SD09F9GwKGl(5PpMCZ6~H6+LSaP|nve=B z@z=^Y=$Zg7Tn`?FRndTs48U*C45V7fMv~G9<_6VmFbv}F!Wv{k;gxwv>!GDG4~dpx zoUG5XA`fW;w7|j}qVUW{IJzjI4vLaOEw$v?gV%a=>5YZFiAC01?%YM0b`{De`jowd zONFlCQo4x4?#9?L?Y@={vjO)INS8+r7=dJesZ9&TM;Ys=E`Grr|M`UHME~&7|&Z3Hg6J zfO)*FM@nqe)(bjY??}?dA4aX6z#BH&83j^jq)A=D*OgW&+i0{Nq9#}S8lSLbA$EPtI6Y)Vj@?^}rVhVm-)d$*UsOs>!q*wJYs{zY#x|HS=yUwg?J8?ThpxsA z-B^+~{-3heICL2v)ri6~O)^kj!8aj&$a)l4GZfWt3hxue`UGl5<67d~2KDJ*=nvT><7TKLF<1-yPD3cUCP$I69|oG)R< z><`OT_CC9E0VEH;Huth1$@{5pKQ0j-QMv{eK>Xh8-n>nyk+~~KDw<3Z`=N<|lW9jI zy4isOFKHJC>)aHt$abqSMWGmrqQ|dNTctMFU&DIvu?mfXC7F`&la=ZM97^$Nt`IxZ zjb^B|KMJHTiHB8-T1YKkSr0&r93YgbAr$NE&NRvoJW$|7E$nqVXRw3B?bji0L4awDMR*!JLVP%l9f@Da?or*ob)y&4Sfv%=XY2sxvS)rVb6I5%%~9;1KcP*R z%`l-I&Bn#&XhJ&%ED%!1qCln*j+GPI@xFu^dO6AL4^Zr^b^@5&S?xrLKS_1_n!C#G zV}R){nA&_hXlgrI_=)jrP9ceXr+v9<}|UfUQb0T=C{*@a)w3Wg$gGm zT+>!-6ijVr3N=}&vjB%uWlrX7{Dv~;puj8dadayOlwC2aohOv@LnzkmomDieT_A9x z0_9olLUFq&#I0&pyI7#aS?v;mr=^dCQ5tvInN!-Z?VHC88#{E^=*A%f_KWDf zh@c)!3g=^QFyFoQi<$3w=&rejo%JWbYh_JLe%G;a(K(v@t_KT5-VG>_-h*T110CO`*8fV7a z+yXQh=T;PW4TyN!u%U_-bL4G8x!t0q@x(Ld$c?L&3ueeWgqW<~oq$6>$H&}-IuyAZ z1zy%KVyg({R?Lt02<6@oiVYKI9?g&U37jatJU`wqZV!aGRn3nN3Y0iMJ|yt8^uaJl zi|H&iyCpf(JcUn}WX)_oRygaP7(7_Gf@a2tp;64t_z0Rk^QhE~of#h!C^R#g$I()C z$DTVwCpG`+!oLvVtOASh-1vkzac+DPzdZAl>UHT0dKX{`=JeO%nza@;Ab66d{4fAM z8ECq@av>0s#**F~G@xRomH7cQ6eit&K-pi$4RgF5&C~3sKi~dUBZT?(88)skNAvC9 zzygu{EDC&B0msVu_Bmg|jP#JX;ST0`@{65sUjW3;w=d!s5|dQ79}bJR2}YkDi>cN( zj;7j|M3*?1n3qXnzh4&5*1Q4?nB-Lyo_S5|YzR^@O>Q|p)A+jJZ&-MGZVZvH84-}bfT2TLlnWm?=jvCQfV>?k+b-eqZDt6M>9-U9#zdmn{o zKETmkEmJ5A2ETQ0xcN{xA6cCAQc%M^n^&F@sLRK~sP0%W9+2KLc(L><0RW3mok0uCJl8uJwzAogn%_&Btvtyl`F6#F0H zd~0#ii-hff>5tOC6D(Q!e*uT|YEtt(+Mz@rdhLpv@k~r#U_h}2QFx}G*jejPjY`E95>Ac9Q5VA-Y*c4q!IITk z1aRLvVN$aw+M&c^DDY_mF|kTeWS|6>teD5LgwU1@q1g#1$YWWGpjpUcSsHlS#$QGp zmkl_gE6a(;Pb-gw(=_u~XaI)(@+_IhvI2e}TqwvfYIJ#h_ zI0bXnBgb0`bE^Oo-Pl@{WXanA?w&>6R^&s%&JM22#@LPen0>|(G>AIWsU zp8j4fxZ4?!$_z+N7_aAaca*fKa_k3{+lg@ZF@>E`L0W-G?8g);6l7;d3#fbL!U}j~ zcY5$Db58y2A-V3tGVZEx3;-DJT~T;uERJrtDcTNq^;d$%S&~MPG=)eg3}M1Vx7`HX zJpe*cz9p=j!&X=ex>|)pFY(ga1Z{2h0CwmVUWM{~al*U1!oA%S#O_lHd*L_sktrOR zYGg~RjaNN73e=nS+J~iGDv>(PsKZSAqQDL@j?PSEZ_SkS<(~+mLNj zB|081WG4fzkZl)jhowy)3#&wLJwl)G3Z8#MqDt^u0-oe2=8;(AzS`Bz6fvZ~5D;AV z4yZ|b4*@^XN7T^E37F_qQhawN^n{(JXB7!|@oiA?Oi3j6g+I~W&|SNPkrd>#30Zfu zOuU`iA5i$%6cnC007vJ-R3bjor&^K&MRHJxgcAKo|FwW#0ED*FSP8OK6C$0t8T#m9 z3p~Xf43b`lyX*HMAa>z?D1PHZ+|&T!PPVk);i^a7i^|h-N3gVe^<|xoL?aCL8x(jy z0*=mLl+km{PZ!P6AsXLep_o4gP_RE%sK>>j=3yc}NB*Put~uW7bAt3a(dv`_QB}aq zVa=2{bDSjQPPWR~nd1~yqC@Luj^6^Vnd4N^o@QzFu$tBgW_hw0keZUqx8#D%#bDCl z!ly$2^aNF?HNET%!d>W}i3&a^Arkw+zhKGYY&9lDJNrRy=dcVfT%3yrIMjJ4Jaayd z&R;1iKEN-qBo~V0q7Vs%_yK;gfR_Y7gwv(0^e(_J14*v~+;#PG5W4`s0>AN50cwB% zCtC*im8wU*fXdTYSFx-P@T<`XgI$BdGuPti3`Q9}2l#cOxjsbWTPzgdHvkIuzZ2^3 z<52ne^~3=0+270`EcZW(`;C^nGPGxtA%f3#Au;%FlBk=lC>wmYs1h~13%*+cSMdEw zw6|H>bW`}Wjg_*VV$h~?>vUZ`4+iI7_kg;h@b(0qs6Jic4#Hg&-ieB5?h;8|WEqa0 zb+=$i!N`7*>7QA~tmEB-cKE=(C_Hl?j?Tp>I6fxtwaJm8j`mT)qOh;__9|zGi9DkD~9v^>4-EamB|2$038a9i8a# zKbwPWaa;}C?^rV>yy9_<^0W7PhgU<*Q2RPQ{~f?V4D(|msoFJItkGZZ`-{88t|5jl zwuywd1DCoxJ39>CP!_pt8~~y5C#=kDhQN|> zA3MJ0Q{eY{dT<^848(4F_!oZT?fVpu>49u%z|U2W`Y1K0>AqlT^SOM5zC;U5^%V-w ze2t?s6$SJ>H+&4UdXVG9XISS$T= z>H%gxJ4lH$-@NMEe70|P=9^!Ys8?+ZYtX9}09-R)U(qgTY4r$1c#A1+zS9LiUpCs( zf+vzR=ONj|bl@$d+!Bc0Q>fv0!toSLDcd%&A&k-7ns4dS^-8}4joCSByb$4ThN?jY z`(`4s*DGPI8CHu3mNYN1*JQdVOMAT%W?&XWJ7QpQ6nHipM;Brg96uW^X-Sq6$TjNnr&s(qkcopX|PpT zR?J4Lq6MZ}4Fz_+aCD}kfSzZglxS8D(fGCs%|>eg3ifLXb*(tmJCN3iHNs`h(tUZ3R~eb4%V)lYdMi^gY`@AZmpG~{jH*QGN@A}I;aEv$)KKa zx1BOT^aCw@8W?>-?`~~K;6$Nxk3lRe-`%<%nlOOtqrj)cadiGcLADS`rg&@b z;H8l}LWj9$5VM^XJyfE%x1!T?R|Z0ZMyOa-j}3!iQewDO!iK>JRiajPVXyF*3;go!*JtKcxE3QVYqUa>I+m<_RGCv3xCh0+B=XAWB>IwtY)#> z*Rq3~M4lbZ2&C(XU`H|Z?C1o6r=_Q%yX|;rIn&l$nTS{0|A&4A%O=G3&n==9+!iPl zlY-ldX3u1$t<4cC=NjuV(uo3vQ*bAtrNNu5`@W$x+u#N@Ap^HmG+oV|`J_}_Cg0LT$WD0?YW^~kZ(}WJ+SpA$_ja;| z2D!KGY+PiHa&J4p0`b|2!ZSr2D|2s+FJVS{+qvKerbKqJskdE#*wou@{PN8Hs@J8L zh1gh&VCdZV!>7hx&@f=G-0@%dEb@K5|MwoMl}0#Ss9& zR!5@16Pq}?lOl!M+1%ya9wnUV7AGNPmM6HQg;CwHvM$E}4qXy*ZjS{N3LS^SGslaS z9XyJ!ROkfZoM>^%`LopFBsC`Ma5CV~p`3Gj3K}5xwDz$>o4UlOEE2uK)%Vn}GtGY+`2> zqZ)x?xUNv9?Nvg%I)r8?m>|>k8iHma)Am~6=_>3xalAg@h)&%g9zU&2Th7W%K%xN{ z@V{fpOxxe%hiCqPqm69n|3_6cDCS1uRb|?;%j9tr%c@McUgAnNpR#*n7r}M(`Z6%c{3oK zFc-X)$*r3Cue+(yE1Qr5gG^m}&Z>A+sKML~nXoha^MBxy<(-ggf4k0$=c!f^`Ldi0C8mNX!iMmXsvu&wey1Vc;q?&PjArdK&2aXp_$z{7I+qZ-bvx(h!@mE95C}fJ| zusyYVHiIoF1NHXLXQvEEt-s-hwHx=xm2_vr`s;1J*}(PJUw^X(^Dz1pcw`|wVvne| z(Gi>9z1-+gk~aRLtnnDS1dn(e1vZ3bfVzTjBKm;!2qscLA(SUAif%w{PzQa{ZKL+@<_y0ARTPg#vq>IJ)7cXgl21iPWE4k}pK^Wr&2r z{GF4p1pGPxVpP6iWmcD=%zuC#UW=N9dXH}c<#mkf?YBU7`zGJvH$DZ9BYIXKTe`^i zsz*nGdedHgkmm_|xm7VU59%<}yeK>~ACAsUWN*!slt?|lXch?3_;w2In)C$}>=zVj zzc|#nm*{L3vchU4Y+);GE+$f&MZnS@jEZ&N%53UIC4Mn0-p1|Xsze9H#qAPg%(z`r zv`bmq^c0+yqxr|F+R@#U@jz6@n9Nk=Q*WQ?%5Vc5nW;e%L4B;-I zmqi7eh$87Jb9#AUBtot2q|HecQq8WOVxhGqsQwKyi*)qb{Q9bHRRG+q6 zm!)0r4{KG6W*DtM3eVKx=!`}wJqLQdXahs_Pany`XvIXHTcDF zC$fxL`<#S!3|0<>XYx2YU!dUl04i9L{Y26hBB3llfF=vr9suF29jx>=fI30c>i}|n zE`rzvkil;}a77&uKxE4R>QX&A{8XP->t-1Zp#9MdqfJ49C#rCCMx&IT18Ayf4h+%w zRtp8tL4bn&uZ8Nxp)L>qOYkr=%~BsM>O(Abx+j7|frH?&dL#zVVd8hVcn9>ns65}$Do1_ z(u%}>ks-81f1H|=f`xjL-|;Lf7wS(y6UO936xim#(G5Ap#mC9XmgE$X{5C{FF@Bt! zD&T1W5S=}pmEOk586fI)oVf0u31Sx~XW_SJ&ZZ8C6SCz1oTGYlq^Uj)b}q|koScVd z80~x%_=YNu&S;dPTy$4hx^z71 z3_uYZR(4`+Tq#ahSxz=Ku2v;#F&7)xkTGN9TG3u-X@7}{_}3>`f<~k7+(5Vs-ru2u z?|h16W{LQJ5LQww)2?LyN0vQqb=?RIxY120JaaRS&P%CGd}!ZdNp2O%pF$**>WB7i z0^S|~5k_~gGPBSQCF0)+{9Z@3>-b$Dc2Rvde&bDl6pyGTTSoOgsz-g4n$vXmvaCpb zy$>xg)%_^2?vJB06$SJh&<~2{p%9I4t585c3@F$?BGgCYP-ifpeKNvt8;Uo zP2fbK^xwa;tjx`Q4o&c|=TUg(1st6NQ;>Dw%G}%+h53&F6Jhg`D$x!&u`dIzsr?nv zzG`XH4RBrip$sOVx=kp(MJRZQkaj^cExCMle|%cTjldT^yagD9YL^$gQnK2WPcw<~_^! zeKG#PGEVn~1Kg5oSFDd#XFe)D{6mTO$cnHo_^~QcJ2)5ogmCAApNjr7OCOgP`>zCU z6iloBo29)t3g^Xsj%Jwl3lyID5=UoR3XF5buY~z^fC=yTMwMtsoap}mE?4|kwBK1; z<=+f%>%cB2c>Fhk2gOnS?^!Z2woeT;od-v&DQM?qWo9QdX8TB5uTN#vnkvi(F|c63 z$jpxh*lqz7p6N>^kQ!SCFG&3Q0u`1adzB#DPfQoGOyMt)1lbyablnk`AiJ=@)6&z? zZLFxZx8NC}*5=qmyxJcvRdyw%$}R%cVp3%nMYCrXlioJTrz%x;ae>0AvP+<)$5h!R ziEvhdML1P9fnTaiuv*pIreg5W_2?z*AW`R;JIc>`R!T=PBl%Zpt zR27pPZaSD;ySCu#SojZ|TwB>;CD*Pi!O0!26`aYn{iSG~uV`#?ZG9}W`U3mNv%&zD zaXaz@0f4Ox|${(TTWcfP) z4*B8a+L362=p9ku=_S!y(G*lEdS~H`vN%2D#Zr^e!b;X;7r>!O56QJ-fC0sJMS*9V z#Lg;4H3G$4a_u;wHHOe^ET+Frb`;oCld5$Ef7s|j@K;<5(4RVmQnV=P zuh&phnSkrIKug@JnUY6t zZK*SxZ5^Bn_NFE?54Wa6dAO-kIS+TjRR2?_xh%e9+EHrBwq}&m+mdZBA+5JHWwMj7 z22oOC?v5SZrXAlig%rLUW^Bh)RD!Ht{_a3hao5y_nsHf^$4i-q3@(HDaZ`tYT;5GL ztlhxo@@}~4`U5xI44J$JCJLOs5O#%!#v8XA7J|R-j{;v9 zls)STzQyUG)?b+ze1K4#Nc0xl^3wW32s^}aeaYO zd15%2aJS}j2rBq^pGe{ogAWs=S`$2Lg?2JKoMj|2_z1MZ6i1@KLti+$^9;r5JX4hz ze3US!2bk!_(W)d%J_c}?7<{bAj|-C@pFmDYRQv?OU3Srls9>{GB)mR>_>)zMil->j zpTd%p&u{SqPjm_W$4U%7P12&uu^&|Kbi&~@td$8XPEfg^fWAX~b~4ArBfK)q?ND_Po2A3f%N73wh4)hIl3 z4UW!CWN*!slo))iXs!#<_;w2IDP0dJ*xw-3-^HQM#l+y>TVa2Yus>R1b1^X(Cn?*( z$X1LC*&8MKCM(&7?9Hk~$HRr}Er2UzZx!vIEbad~C-}Amt=U0((d~r0SiS=l&)g}J zo^pck5=K%$v;Sm$H_OC(`9A{+f4T>SXYR$(xhs{3kLvp@$^9aEAVflmepEjw;6nir zf%GsdGmGl#oZv@5)a&4OJ%1F$F1R1VZ_hkV9T41P%i#Wt>QQH+`n25>ETf4aPofz{ zdkTeTp2pD`jZ%7!@xO}ZnGlU{wNQ-z4N$OuR;YiEL;a!Bbf2@5pO@qptmGdmCm7Lh zyPg>RFG`JnST$_)zobfZz+Cjd47j5I716$GX=ghf__YLa)QtY|I^iyC-#`W1l_IH1 z2YyS9Ns&rj$?cyk!|A|pqXCZa4hn2n;^_R0qT++|JxlVwNInRWP>3I#9}4(U07M{s z%t~*A^AiyDIyhZtKLxQ1&d=~0pT?pN2u`wPaQ<8M=vY&In(K3x(ct_7%`n=RC_M8O zj?QS5(sOWrEt+paG``hB!TBFR!TwvJeiw(@b8hc{E&cbR?}Pac>h+usj6kt!BnHYn zV1wOD%ghGKe5yo^=K^JZz!fM9h_96!%y{TwUt>$V`mjK!)U9b@XTsBI-^la z&$06>(WF8&zSTmpvpS$)zlKoPj6>}yv3D&?y|$>=vD7_f0wZp$3W;&Eu2|JtRyJ<> zs}ePwi<>&Y6*u*w9bjqGby(i)T@vuX1VPk^Bh)~+3zk8scxFA3M3R8lS94OFP*3vP zfMw-|!iH$VaBPGEujs?kjW)%_hsY+DWK)rB79yb-KSVYcaBu*GJ8r>BZ$o5D5cN7l zTz9tuu?vx{@!K=oPzQtv*)l}7RXsY=RG$VL!ZI2n+o2go8;SxOKsY+1QA*DtGE6kX zLo~kCLLo8&P_W-Ys3YT0<1>4Av~)X(Zf8pup9GAsuwoO#Vw9MSwoGhT?4nB4SS~Ea z0IsmuRkUL*?JqI^cU*!cXf(P_BjGM?n^3{K`b09b{NLS$l@!LbE7>z+3trj7ZrmQg zfCue~!ZUl}=p2;F#7FhsmSnt0_6dTgNi?y}vslVZ0jQHMJK z*YN$CDg z&}FR5>|7S4jh(7(Y2&Aeu^XMAjIRgm zpEV8G?AH6Ln(?-@`~;)7sKqw+&#kTu-s>PECWH5SG~)$%Qb3tRPH(;G_IGLzrSC>F zcR5f}&-@<9NZe%+PTc*2crbDIkND-88&$7Mx5Rh0Hq@bJ`@UX$4wc{c%undZ|-i-ca@S}Q@ho={4UJ8OuS_uk&|*A1pb5*Z(|1D+(ga&4BDGzH)PP>!p7C- zD1-J^us|sM3592F!?7}h_I6*wjP%m+gJkX?##Qk1tB($k^_^(0E!>F>Lk#y_sNiE* zY(k*%;?%k^MueV#j91&e!k>Z9DEYHct0$&%!qYvZvS%90$=r(;^y5Ahp1EJ-a^eDI zBb0GLeJwP3K=21GJiQ?_>3tkERI8o4fjU1VrpY=#3^;V2O_}BqkfVc-qVUXPl4U!{ zF4+#=gbqF~_+Ko%puwp!a!zT*PY5Gf+b02swn?M@6u{8tX%zUBvYZgZNp=OYYm>1!o5FbJFj5Uq5EvM)l*^z7Wf?zQF$R4D%vO zyWJOMVEzFpjP()<&%BJIJ5^J>JM+vYDz6Dv-MO+VuLBNMm_r*T zHE*CDO1y~zU#SukJ7N@Bsl-2p^R~rFGq1X0o~0J=2$-zJyMRNB2(5V!0I2ak3eS8X zE>;Z+tyJSf;e2Fq(yKy^;CNPJY6h(C5~SQ9MQLL#N(%xUCa4}*|jtP&WXGF%(1zid7X@DY zhNFuh%CW-{n_WA<2o?wt*ed}4e3MglZC`<_ou+)USx|5$B=r-!g)BQQ2W^)rWlMuo zgLM`fyl=;ZLbj!AUs!~af1XzwHPqZsV_jHXjhLpK#=3|Q>`k(73$rNNFj$MBz$TCo z?DDyf0PE0@{u0*`LR->DTO6ZaWlXaaU_On#kbZ}uzBDOZ;Z;#!#|TFk4%9*6P^hJryzJo|Mc@6EkW&^peJ*S;rmcGtZY1z7z9}Zt zsjS!*q|K&=joM`#zGU9P50$~WYu0Z_`ER(*rh*UWSgeZIGEX!elT%$Lg9qpEGTlyO z(RQ>p)NIg@8q(Q`H{$k4ikCJ|?C$D@biCpyTWT!9$=y6RP3!5=C*m^@COh^ZAye~ZGqV1GsoZiMRuO1u%S7kFBF4+g9C z#o2t)HRf ~14$b+q=+t*jg30gx4QLp%`8o@tOG_T+CTb#_7b#DfG1-4mPj&=S2d z<_(XPXx1mfSp^p1yQK}piFd{u;+JPOQoSxc0+ccOH#m#5*w3LhyCnJnsT}T)J50VM z)T9Twc=yW=;hc06nEt%(eqDdf#_WNA7rcpVguCEP*|_o?-34z37VzuMQFvxBj+J-8 zTlf-Yq*v~FK+KlJyvp8pRo)P91&DnxbZh*=&Zz44#OMzvhKR$D+hM<-x>MU$h>^)7 z$XFjj5_<;r`Izm{gzgMQf$dV!TL-3S8>75C8z%U03)gP?tou_$SKWDy5U*ricMzO+ zULz%YM_=|5Fl)7Jp5%9mU%;N)-L#A)_I1H|r)!ZYJVZN*YZAlBW;>?5@15Sn$!;6`R&f@a}H zCIdX($V?E&mVhJr&?+83ts5D}Ew8y~06JEdC2wRV;)iD@;b`3l`sY;97??cqs%~W1 zW%4Mnth$le4{aEZHWZ$jjH7c(%CW-{dn3~>f{qY@oy^@#p&OY_fvcUPd?Qm7oEL*e z>`IoMwiCx>4NfAHODVUU%r9}f}YH2z=K+o@GMrgGx7Rx_AU)8 zsQ5W>dxGQ3ZL%$Rx~su-L1_#W4f69omvyrT{srRx8i~lrmG85r5V`T^HBtvaXZZS5 z6rMRyJ*+GE!$${%yW`%3Pi-9}lwVsET`bz+<+Sq+r#(-gh<$={;55Pc)Yidbbx6Pp z#D~Tb*B2<2ae5fxZoBSqR6KKpNaCK_I#Q5oP3jAjOJ=`e8F^~!D73;9(^25@D;!Zz?`gn4X$iEbRHO0wkR0e4SrognfP!{jF=kW&&BKbdg%2*@d@c;>ev;pqb6 zPgNxEPmuol#{I%`3sD`O^QyYxA}*by;TfIPHxQdkf5 z9@k957MKBKmppVOetYIB9MRPR+0v)3Ry{g6>@hut3CHg&}VE+f9{xJ@9XSny=bXjww6?v0H-fTst=e88zO)HWg zD!&VHi{#&G<=YA1PpU)*%S`~c0j>$)cG2EpY13ZZ)$m-K0o-+VFcp+E7lf`|Gz;9B zpgX%sN4tw~Hw)a23br6cQWt+UPv^Zy2uafbjX=)#vP`-sxDP=1*ZnB)Doh-m`%;DY zdEh}y@{mX#4v|oxKMy=2;G+Q$%09-*?9T&_gS^-CfHT2gK#c00u1>K3Vgl^M`uXN>v&N?3ii(n^@TXp zF}hlV$~)g52I^8NN+_4-N3J_fcS56i4U46cs-cd}v8N63NFQ z5(@EWf=>keGyo#jK4WF}XM%r$yw@{<>-N7v>}G<`@!K%8@huse3H}Qx*ncn7KG@!N>;u&H-p&Mb z?7n6m=mDkXMSt5o{O*8{VNwL)YTnT1T>`K9~(XN#I1G@lsk9N{j9czIN?>nW1D^acoqy1Ko4 z)f@Yo;T6RpYCD>WLRVthm>x(0y|I;98H5xC1rI<|G+{KrLyq2)Z^#|kOJc(bAT`h2AGJ<234XRaiRwSuE<JkI3GDS*m%3AnO+a}?O{!VyNS>|T9=O3R+Jkp#Vk7;kAA!+9!`ptn+c;@?k6krs$Y(CHeKtWW|%|H9#28>lkj^QYkETUjvMOZmiZ?Gz@C zT&CUHy}<&NcPavW&kWDf-P&n^gvGi_HEvWhod{Q=R+jG2&OmbLvb--M_^=t1%3YRO zaM+`@xeHyEX9~5vODS%?>_@5+m*sk-AR8$J*bx-D>YPW}Xm}XI(`ESp!Rbm#O_tJq z7}cmmZR@gppqN(I`8$9^=cSZsz6)|>@E`=(dz37jL1xKjkS@yy3*KPio(iX2TWy3N zB8=+VW&nq_^x~~h=#79vnI;6-f)p#O48>bz9)~hn!DmO{)^6brXbVBT*a6J}Pkn!0 z9KDbu-WzkoWBJ+vrQNxDO9Sw&(aMlJpl$f!n|3_yix&EKD58=uoy05K0cDoS<4}fW zJD>(>D2D<9d^-(K%eB^{9Q(SdxC7cPg1Hd_+b;&~(7JNY6S#U6nJ+kZDHe#`VU}Ib zuAG$omF#~Hr{cj5;}Obf%reCtMqdb}o}3GjhO#;m0nRN7q1=;`b_^C9juzT60a_(b z&SL>X6(dc=lk+%Igd?G)@Z>z6{4|>AjVC87dfb!q1f~X_oG0QpJ{X55>ZT{9QRz=o z+*bON`P+JOQh2E+=P5`j_2i^w*dD&eu*8$|RHVSdry=mo>3F)7%Te9t0 zoFvN@y#d;A#w=^Fx=trg_;%us)vkOiJT+^${SisPr=GJ3oKfp;!-Ud)=NX(cxSh4V znR-b^ojDWgqbhu5+tyjsKWJNLt0;PCTjvnD{;OJ4=R$kb(en`CvP_G-x@cdX z>{Krh$`33``hL{Rnc3M{lWlLx?ohK|Q!YFEu>72{iJ5h{AGo&{25(hYlLEcrqDI$( z#J9UMsCcvU_#z6=L!xR;>}cz3&CXBkmv2LbR_Zj~7+grC3aoIh#@Ce9cwF@B?h?-6 z_Fr873o*E(c)KxMZ-%3Uopsx6vCX(`w%)wXT!;*Xb=8ZO{6)%YyyW}m_Rg)y7XL$% z)_;9WaWV3Px^M{s>|87NNiVPuOW)Rod8i9N7Rpa73TJ>pTjxMZCt)uYsOlu}GJzMS z)2Kzx-F4|cb>k{Pab^0yV8wzpyqa~THH$m^K6;WAAluLld%#-Ef z%|yJ?PFzZ7l(zt4eSUAnFW>w^@p7ME7CGjVK{hqjxlM?%ekJsGx}7AYKEHP$3Awow z0X`BbdRu8J+BQr2{N5$_-4>qS2staR|Ay73v=X5Xm8vLL-+RQOx{CJ-PFLSwN#uQj zNS-LIAd<^z_g7?D>rrQRrVlW*XHqM0Fuw*E7I+W=PUGU~#!^&3W2w;L_aWgtY;n@q zPv{qIHPq!1!OF8&)a6mYp-VZbc?{`L;&BANc|uHVF;HZvg!8w3QfN;_(5$7y+2+#( z^2`` zs7Q6m{{r0kH-04YkE7(DR3WD%D*kW6ojb#S5b@2YA_=VcnIduV6h->~GUOolbNuj4 zzv0Scx}$P;wj}3?RwQ01u9a^BxLWx)MEgxko6f-! z(GkM3HgE7sjNxZJ?GcM?^Jd}RyPEf=Rn%mTSa-${?wa>zi1;Qc5__p{R8>~RTEVKd zYi5;9$1p3RW!RrXaY+{8uSD}!G3q4POOC5+h*5O^)!1}F_R=_vK7C*LRs?D8J7AF%|H?gV_yW=M#R&V zH3iwSF7+RpDa`#sOjNjfMdBH7VpD*tv)W&@2UuDyGZuT6a8ifscI*;v;LySzCy`0C zXXll#@+RixuxOXaw6-QXJFx0jz<0U2;6N$=Nu1Beoflm-T;?@dxDpkx5{zZHxj4%) z8~pI~t|qL{VeuEMHjSCad}|)db~r$mE#x{{n-iHPe5y-_IBMo00SEp&+OV{zZZBOW z!?X?Ud?4Y@)95>h;I37X*zc<*lRaeHx)mKH^tefBK}wj#uwKV}d*Edb1{zAY0fBE0 z!PAv4m9nK9IuV_RcZYK#TFz{4GBzKL%14vUM|wF{7+nNQs^pzt(Rry^Im+4`+3sMr zBJrYk-9ZcCt~EYR;iDHtO~2t$08XA7Nv-tXLWO)rW+@< zN`1UQ(N*dbkkaES^@&8dz+(_yrCuaXT%|qq+()U5a-dtGRdNa)bj`edbq@I!OD@d1IfXe0)~>%Nb7AfJR3;8yrnT$Szyi(TbOiXi9-gIZ*JlJ0 zN-wOaq~Mvvthji679e)<`fU92%{hu&FF{wdC?i${tX>Bh)#~-R!YuEgitA$Mk;E?4 zIY#Duq#;8WAi#CDVqj}Cg`;6((!k~G3kAQ(!u8>lrK(&py|twML$R%{_r-#9N&6D1 z@uNTue%pNsYOvG&aYgpE9`nR5?saT1Hxe{Z#m%e{aP%oCguLGXmRo9E-4IxKl=|=HbzLvh(;<%DT1F+a{V#uZMoAJXpx8P|@ z6Z+q(h>FDgf_P<1-^?<3+{UnM>HBu1p*Ze9;F~+~bajMsY;jav`u?Q|?uroDZrC+( zEq&iDaCu!RHt~A|=W^-2V)rY{E`1g%&txooXq&#P!@8@4TlD2lDRpoYz(H|#686}4 zv0c=JXBFJErV-PU<^+5KG9uvCe=|P}+?H?W(FR>khQ%z-Kpkz3TT(UmK|hoU^ZwO- z;oQ$01b!(G$mp>`D*Al=n#lFbZJ|5}^-zO|T90zc>=}l!4VGt-3R66Xz&F3g(=}#_Q)4dMV0m7cFNB!L#*2znm;4goZiD3y zBL8EQ{7+TLDT#`|Ot{-%c?A*Q{8=Qyyx~CLzC=cPZ4!aV~4T?Z#DYCm3yo4?f8cDiL1|?QvEjxcT2K=MFbmjBC%@| zMSY43GLbPbCHzh3)mCX~70P*=Vedu>e+Mcm%{vHiH7=g6(oj8HY07tE{%J|x70G)M z5{eHNZr>O1gAfSSKV+o0b3*ekU`Hp0Cm`OV|0Z9TF8?0^*{#!ljNiWb1W)A5BU{?> z--^deih9$6|6v%JT7HT+%=8%o+)|6DGZWccGgaGX`CK&p;OpvW0y{;PWmf{mp9 z)0ve}_eT}~aw@J_*$VG3;RCGj^p__uC?~Em;2mUbSGp%NP?`*~n%ISz!HUGo%XJJj zfU9E|BHE#rR;Pn1jYG?OgR%pWeE(P3dMRiTa#$60s5z_8aKc?fAAyK(RuPFk#8XKn z*2-0dQ>}F~Yh=C}!yd-is{@Y8v<3ov0})SG@2E)SCcTy=Sz9FQL`WzzXwvHnxLyc^ zzUwp6>n1%C*nMu&F4Lnx?3(lj`0bkwDI86jY-zlN;_)g%y=k_M7)F}(XvATrjS=`} z6Fi-n$iC+${SDE4GeQ&CDbl1j1r+SZ2z9ecs9U%u-Iv;Dl9q3+_>Q%F(@Vt)4biF{ zYlyu#i#<;A>#Tg+M#n1>F90_L+MIAV1lmIMTUz?^y_Bu0aH3Ea<<<;K_fo!vB$#^} z1isl8Pgj~0WJ|MjFJ(JnZXaTz2JN6oJOfVbj)1F^*-5lJTiW!wt{k~pQhbNQfz{S)P z9ajNWP07^7VXCXzrAnrlf7)_ahV{#FCd_Wo5Jfuyfp2!l6Ggim>x*W^`k^}K-ftE6 zA0{e8d)N%wCNxQrcnx$-Xfolh3GFHRy)1pD{f8-4xKS{Res6~H{fDVYhQ0Se;G1c9 zx~4>dm73CYVa^CK(UkU8B%To``rCl3Da{n^ewH>p7)?pHcyX}KPQAVgABtoaQVh8X zu|IzJ<^Z7wlZiAVOL{w%NbK|5iP}?*0~ywrs&Br7MA-4W2z+x8o~*Ye@=#T%x2-B~ z_E=-EBo_Ur`?F_>^TC!gXSD7lvV7+olwNgP;t+us%5BA6-9E7+I~&I)aLS>pFuB9H z_4l2C?x%DI5*DYw+3Ol;=91`2wB6D}0rQX? z9y6bhh;J4!2^~num$6v#*jTYS4v(21Ce-rIx47_nIH^jGnIC}^WX(t5n}s4*rS&Kq z{Su}CJZ65R;73{b(jNInbt_TZ9y32$OsngB4B*gtX^)v73vy)eI0U{qUb1WknI)S+ z9y32d@DnZEQ{j|rtKF+b!l-C%L4a#q#oj7Nm8^n1dVZ?lr$yk_ zmf?=b=>+v+N96m!(~I*AaXd5Rh?nkJ;<0?~h_EcU#6tt{je9mj?ueX&AHF#kPdiM3 z{^u#8(lO@~uWUzzStgGQ7?$mb`~YbvhYJz-<{~`Z;D>VT8@l3-$PYzuafHD3sX^Pc zN6#-2xO%nuk>Fh4{IS^m#Ioz*=()KRC@>4fe{DMO%b1y9f8wVyLCj>u{fWzkPg4oxEu0|q^Oq$4<^lL~FjxU$uO!~Ftr}0b` zXY!J-3$X>n&!qpHsll1_>+st**W-yw>q%*}h8q;Ot>H%gwrA2Qy!1@^O-Ly{lTOR9 zq1?={L!>PAQ2Y39bRMSq4wr@m!07Sn6&7vi5oi5>bi zcR_t+?UZrv2C(7D9^3CIlCpC5KU6$NA-AbpC zt6W%E>hVQSFXkBUxQD(DlN4tV^8_;$Oje$hT`^gCiix9_X|nP(SfF|Q7J+YmhiB$@k^#}Vse&LL!;^l{#S&J}h39_yM^$S9b^+@5~ z`->zgJMQ_^yMcdX-Z?HcJ{<4Lqw?l4=OSF+~R(IWHL-f+j(mIGL zSgN@k!d?;Q>YD#qa5{v&DotJsG)eYJ6Rw55UQtr5#~ku4{TGJy_8U#%p`j&Z(YygF znDI>nI5Uc;dy`XLJK%M0Vs8oOZx$zZteGmlEm*lRiYopcaHtqN5&aI*p~OEB_~xHt zV#|^uLnWMB*twB* zij$xffa;qSBLue3c1>MwVJit-UPp?n6DteONl<^W8(`U`w~usG#nULUF(LGWLk2NZ!O(87tQE6valTYT;ZcsoCzAURT3~ zGQy&0@MVjDZB<@Zs|XZRIPCdX6`WpItBKX>AuCk!H7XLBlf+gWL>1aEesu$@DpW zdV@ud!-gO7p+gHJy*b(5#|x=zrDQx$%s%rpmT>2xF%A)2_$!iRA77v};CNAL0#V^> zKeQS}ZqBgJ@8>N*h5EH60^e+fr>kF7+Sadf2d%9w$+twZO@xG+1dATq3b?_)DTiSFdj2Cs2igX{b?}5?C(L}89;9Fo=;mOX&LDR1AjzJC$z_X@`YnR+5W=eis zUFM9{Z9l?Yzfq5fZ&D(ue5!tb!K(EP%rcoCz_7xwn4cFz9Xk+#Z@z=4tHKmqx$S?~ zk{l$GSrHP-3flg`0ycy|w6a4OSyJ0yGurl}1~(ay_qh*nxo-rq>jRqb8&~gA8}tEW z%RV5hc)U=U5!!Dy!=6snw*Uc#%pvei9#3aT%Imo=@I*5wLK9dr(igM>3ifS6ZLfs7 zSI`$kt+uq))O1*Jof3Db6_@@(B^WS8YqvRF8uze*bnCLZ*^Z!Fk$54xvD#e1-B@j& z=;vGda`&(WRX9;7tHWUoOWnf`M-oct2n0A9il?gp6l5zvse9N$VICP`q81*dNIU~h z?9qU$Cp$*8$6DI-L==6+C5NJKLdntZB5rg6T%FT#RWzWo%=qzyyR+{nAmW=7MUqT! z3@s|3eUHqb?D_XaB8lgd*`kz_81~M){A8fQ?57~Wt|p$&>{QO0Jv;-y2{i1DA1kqmx@DT~Ej4O2^HK5+tLUBeeX&2ovbQz(Ohw{`aW(iX!d(qMTlD8x`byqe=T_lH z!8G%E4CCHe=OY;=y#N6YLE`C5N`aMX@`b{@D8xif{-GlAj5yI31Fo8UiD-XhY0G?< zz~{$R_)sLX@Dqmg&bkyod~=!5gGt9v8ClXBr^H*T&o3BiPc<%QSni#51rlM$#R#xR zh$rk=nnQCX>1sVHFT*bR*m%@Mi>p<|6V_KrLS!{Qz7SvZ__>;bFYp*d*Ws@b53a*s zi(mMdrQ*r-Y_y!-yKs$Z=IA)z*v<2GV{!hikjLU@Lw9?=tAIOg!-dd}**N%_X>XX_ znrYd2Q~ul(tMIw4v3&9X1TM#ZLcOAL$bkR198-G+#7Zf8=tzYyyUR^Dx{Lw}(=gj(Jm z6j$W#BvpyO&@YjK?A?XHH+PHNuGdmF+6t;Z{e|uk{9X%RTHhVCz!J5sztFG5w7Sms z0S=v))?es;kRyW+Ai!5QCCg@zS+W_VztDq%|Hi^S6;8Rf+TM6b7}d3X7;tFIJ%K({ zhDSgJB_BoLo5v)=DoM4hlH37#T<|9>JiP;CEPW3UD-7*{JSpDQv-gzX+yi-9y8JfK zh5GjDBEV&$;$q8$LQBT10TIMiAqFY_7@q2}udaEwx1t(sISRMTy^ydktVBWSioh8r$_C8!r0E^h%(1A@Pa zgO{*r~$PRx{}Tt|BTM^B(caHe8rx@_3(N z*@nvpNJDXahydTa#M4z?%CW@}+i=-Z1B{?AxBg`xiL$(U6g%>d1?OVRC-{X8BgKbEMbxB{Mh(+)%n-W?YV z6n03U%Fx2WddrGJTPZ-p7Fx`~Y-PX#nuvp0e^P`)Wq{|#jjD^R{SCSZ5_-gywt&LC{jut%xD?5iD3*&9L$Cz1r{EG0B0xh zbS;89s71sb%vKfhY8F{H4oz&uS4MEwJK|rKEp+Aav0wU^aXql36VW_xg=mT#e$er7 zcZ@3h`X$E8b!K(2|JpT(HJGWOL98kJ_0S;JB69tgW#6^I9R9kQ`abg7wR0@r2XorOq)BT=el)lh8`h7CiM=1w zB{E%hZkF9Fx3=8BHh`iP+`l$NGS0jz7kYEEi)QBK`?ZnMdz@`@Y8?tQqd|ZfCxhsW zb7S%0jB^wG^369CPim*3_nE5ei-FM%GH-a_yax@x7PHJg<==dhxeq+THkG|GB^|@W zwU=p1x*1rYVI>jZwqHC;r=(*835&nf!oAW?l1gW<;~?64jnzrH@rozYV^IP6San!U zF~JRNsJYkX;uh=ALI;B_NK)TtVP;Fv!Jbg*(x zog*l=!8xnROJ^5>t7?Gt)1VjLRor$fa&sM#df^F5ui6XmF7U$iLSzJP+@0NB*@iq8 zqVk#6h8CQEE-dXuiS{L*?Ztt{wrp3fqj|I3DD}1zp-_e1b`K=`W|Fjx_qLOj9_ek( zo=Ax=@I^dr_aedt9)oCqJ4Kw>-|mfHzL~0cQXl5&Lw{S@l=_JL*zjVjqTg+`Rll)` zV6c>Jhz^b#c-XuFAB4+hOhX$#qop;%v5EGM1}z%u*!#}YJKFIQtMzC*^yV*qZF=*H z);0Sun?bidO~r<8dpZ+GEmOBW11!+4_eFq9qVX*4wr2(s7VCQLWvwG-KeDUXN!J5n zJLwdD;bXLlC)3;%u4oYyJu5MKnAeKdAmO6KJ*UU@2AI1Py|7wd@dOA`@hS`vb}ZSArTVGO6{L zZ_cQi7}jgfC>qm@1lTQ$z&Eq;bQ3BHf!$Dd-TGLIaB>z$-^z8D;T8J~>W~*kc}9vl zcz{C(erz|+z{~*@3bi7@;bpP11xfLx3bhNT!{TVQqVh@^mFg5$b)^mk97?SPr7ANq z1{hGRfB@%w#m*{5HA)rh7S3FYqaj3JdQ7T2Pqfw5oewzF+s;hb=;6ZW68JMF0g+fOo@Xaw|Wfh|MP$9SCc&yNl zi=f%QBwTSko}gZ=IGzAJtvH@2j*CK$$k0jRv3#vKvIFLdBMrcId@@6>IG%zZ*h9zD zHeBd`sv;^7a~ko=RvejS@;IGg*^1-$k%rA;PD)R%WFz;9r9ekxej@r*qv|LrJqABkKX(lyaDh9PYnqZ8WQpo0$jPt=p7jHux0+3#^$mZN`43D>{t}5_ZUfFA}uiLjwhV8Z=<#dZ*&Dot^1!?ZSyf-PZ$7PO+MKd3+oJo@w1!U8*w|Ade_l z+VB5^(a;Z=&0s<4LX~c;HWXJ_FCudN*Shq7h%BPHU5vmtm&hPVFQ^i1188xWPq9V( zNGLzHDCys$MeI>p#c~;ZMx!yC#lhEH2R^Kk2rsproNwvI7dN)!7jIERYus8?cqtXD zd@~k9+wA8!5X-mME1k%>OQ2W_3>VDq7Mv$??h&hd zLsrQCuPPGPdX&nFav$N&NA7+^eDi=vDxJvrwIJo1)OwUlW)CuqoyhqOQelyY5cuX{ zJe@@-&RV3@*Ygo!J{n>o8;>bcUGn3AyAwH2i2TVY`BPQMDT#_dO}IOe^IJsV_aG7$ z9mGGQNL)Nck^WhRd|Nz+A8>aNdNREa>aPYYfwz1Umg$OD9+zD+iSIA9Hjl-3obpXM z+%s4ltKe#5JM59q>GR?pSA|+rl@|zi4)HG{;+vO5k}OU>nWaA{QB9%385!n--2TWg zI&J+E5>S{gBk;{Dc)G%*C|j82$6x+zNnRDnYY`F(K_j3;;_CwbB?LmjHy8 zw%rvF!Qovd;f+~&(T25hC1F%+$g~MruM8~I&B|;{f25-p4M5Oq!d zut;hmB$ODm>LCIS4S~>h7$bda)x$y3=T`0VJp#n8Rj-2IxYU^%pjDGC?YElZ@#;n8 zX}Q%ImbdCPkO+gViNH5&;pq%U89le^wMDZ|geI_9q*bpADA=zj)b%T&=6h<@Uv%9y zBdtE8q|XLcpY;D!1@s)&OtI51?zO$4luKCUY|pWgBJrYiJ;!Lk)pKkt+D$BN`VrVR zZ0dFkCGfjWjM)$F0PQV$iQAv-ChYeU`NhO6W^mXwVn|p`V}H!j#=pLVdd5Xv^1Z4g5ZLKQ80n0 zO{fzpp)L-Mv;0?nv%AgCL}h0Wo1OGmDF^8JY#~+c`6elElWpE?&$p){@ml42zP$if z&o@Q1dt2J{6)4%hY-=RO?98ADAGVoZQ$}I2$wM`*@AyMR^1zfhBsa0}GgRt%FL%8eJrXk{+=_0YdEKZl+?50!P z4AEBW>u5;|-Iw7~>=1n$bZB=o5%^|5JYBP<;+1>AdP|ZL$^H=%>Js#T2MEZ4pXG;q z9LUI$dO-TJ1cht91A;zxfzBS^1+nV_55jNX%%XgBfn-a&9;|r0vQl+gw1J`Zu5lV2 zf+QF#g8+N4csgTILeG6*lW3YFG=a4ueP9+)u%9i|mP)8?Fwg(AzMGuYDKDKot5f=a zt_FG|Yp$xj(Htq*Y8AA-QJW(10(QMoJK*Y#Iz-!PX?4M0$<8GvZ|z;`YNmH{^uzt4 zU>r0yfi0oPC`yAP4UI~saW&ycrcjq~3;Fp%g3d9Tq8(aARpyFy(Gc#slma5yk`_r) zhfhk>W!>%;uv(wN{F3WjhW7YM2?sL|093^J2yo>zp01u#bmdOrFiUc{NREh*P*~6@ z_yR5rfl%~FMwZkmtb|&RA}~h*xz8Pf%k0r0b{)bo`0blxDH|OE+0tmoDITvcRGa2H zo?(63X>$UiFw%(#e6t8oXC!j(xidIPG$%)B0xLy2gHr$n`|k<$)Jmu+nB_~Uk>)fj z`*g|vzLlN+(!`;0+w8|0Hxj&^S9I2_ctHCM>3F8q(YE`u6p5FkTYNs7aJTq;j_A*| z^yOC!oL7Ytg|bGR&#?50feVm?V*3FC-&}~Ns}&SvYeng@^F_k^VTg%}c(Ee!3^=iu z0IrVfN22|)rPa*1#BGx2=ykCYI;@g`QAy`iy02bWGs867KCXlWz14S{u+cp>kr|t4 zoTXmtCsp)hmS~nsbrb6Vw<^EYW>`* zW!ubkip0(C+RXKYyEb!!=x?<2l`b{7sR}m=W+C3pFn+1QEl5TY-iiR%N#p5?kOC{U zo!f+Ydx(j)bB7}Fj5yJE0f!dD`u(IqoT53Z|^S9$(rAY z-9wfgs!8d|nunFHAx6%cd(T;^fmmBI`D|g9c|_oa=@XFGDl?uA)6m{x{&&9J3Hf$E z3dPENyEo^iXz|$0V^Tfp$o)7{dUWJ|f(RFQ45E(QPl^*Axu3!>-#o2>jBEOzO3B zdR@=~)on482arCucd2`|$;jb0>)q2#N{qb82 z>(iCU&EEin5#C0C)9rY=ml{RePT6@}zayM~SR4(JVlA24{8I?!xhrb(F5u9HL#!CR zc@IFS^F9LKd>~G?OenlmoezccFN;&UjsKAntE=!a;7}p-RX3j?0b>7+0GASr+KQ!+ zP^|O&{Zwe5MbKEA!9{{Tf)24*1f%KUzrW%3xru*~mwFw#&QH3)n&1W#8fDaRH^%F1(zDuoG@eXZ?tt3Z+R-$p;n41mu569j5LmEuLL4@-95h`(lL|7Q8WN!}qk+R0 z*~Y?+86$Kt+=O^6#BbmSZmSl0QkRyy_G|a5O?Oug*Sw-Xpwn}P<skc987px~FHneil&w*7 z%gvo_hqJc9rhYRHTo$@&%s3#!E_Grw-ZCmT`G_(qvpMjX&n<|b8c^uQS3c*PEfGtt zP?zeDtuNDMwnB93is57@H2_Q3MBAEZ{c^j{%3+!}h^)I7;LgZzF?m47WE%5LW*bB> z{h2z*RpxDx;2b5k!*AR!jVC9k$6{4)pN@`Bl#=P_ZpkHZ|3HO~^66=)}I< z4hX}BhJ?m!T}`GgF=>96$uzK;e<>PUV3&1#)>R1x>GYan}Y&l+vT+^8D^@&CT;W#4z_Hew$9*`AHjip3K?kA zwkBJb*w=DSfH!Y?!Qg>*M@PUmq{+WWmq50U9aKkS{Xp0)?I<|=rJe8#ca*98msHBk;{cX`b|gywURNExq9F zvxl%ISuAu^D96c)lxH@u3-Y@s;Cj96CE6*LHhmA*c2Ls)WUhqcSSNPwy z__i2lGU5fVDX;APm;G0%)3}sT0DmG{ne!Im_A6;UDr5SxJ+t4F)7VD5gNCceEjr83`RfB;(@ivaE}VwLm)Vbrb67IeShq!Dv1L3D8NcoCmqiSdTk%2{}7 z@I zhO+({0(gwz>B^c4*s?A^RCtvoxmqOGL`Wzv@N&CWz@LXe=z1L^;Z{SHLxt=P!f#ha zzYt@t2YF;q!Ygh^_wa2kXMr0)?EKzt#Bbl+L~Y>rMz*x%&5FkhhZ&&_Z(-QecXe(B z0u1>J1irZqPiIKVvxcm8*zk7I+!3J(EE(~HyAx2b|D{mxs)V{3OxNrBXzsTB?h(Ix zEx&Z%So{i9=%B5xF}F3>IPa5$`>g~!nR!5wxUF5|{52V~aXu*8-&oqTj|{rG1un%4 zS0Kk!tT6Ib4eil^wI=l7TsSOmt3{T@6)`QDFj{iqE$hicRdi=ISx+7&+%@7y5b@2U zB1xtZRMMYZ9}`Noc1$yn_u~xftMM^U0E{Z~Bm&<&g{P~5RHJfJe%g}!RwTcRkWggM zl%Ems*$@bIpJQa{oAU2L-sh(5jPN{&T~mGmzi~l5wLw!RTN?8v#pBhB8KD{fz_6#L z{6`?bkbgqpo0su)hNQfnoAN87`E!IOuwYBe;p>Ig& zn^tK0OOW$dutuj~b6vGlcuQ*h&8lHLg|`)nm)iMch*JAILv{-9;D>MiA@pSW5SYrY z87yNrV3}~r5?h&OrTVAj#nod@sNTDTyLSH`BEETFBxPs+KTu+|md?D9+lLIJt^@x< z0_x012z>J~o~}w#ROPn)i6!~BNd6Ncp^%_$e=6W-ArKk;FC$Cewm%1XpWC*}cE2@> zZF>d$hJON{Y};hZVpvJ>crh>|G~CL7_t>`k0|AB{fB?s>#m)L)P+re%dyr@bM`!{| zM%s1_pkO~ls6#8E?%Kp9yk}|?dNq-THR{<}yUYoC2>*Z8v37+VyN~P7T zO15FIu1LJd+=ONg!rg>sP0_Dq>B~LQ*RH~eLRt9hFf8>%Ul&OzkM$7vW_>(eyP+Uk z+@({Qk-{7mVxrD%ph!FePV9z&t7l4xb|Xuhz61pwUa4=-xfK#)_Q*GJ*Ck^XXn<0T zMu#OC?D=4z5?sj^GKuaN73jT@$Y!u(hy8hM1J-CWsi8f?EyOYOhz&o)Fy@j7lBdBO_8f8o2l=H0hXalm{ilLoOmY0~> z8ZflCZz1r_Hh8-BM)9`2g$EF@``s@w#%yazwiC(rmLzRqs5}&koso-B!B7l42x-Rv ziQi0$)(9H_z&E>zg?%efpjG=aXn?n7y#`HRY)v#1 ztoYp}exeosCCCZQ$(1bh<)!QAdr11EK>GHoAdx;rjgj4oZHAm#Rqa>IWN=|=>?v;X z(s1o-FXFL%O~DV}>@D=9Cvxok5n8tMCya82Q>Jdnin$MguD(x01ZR$gpG@;hGx+8w z4@TQA(jK&#QIQ?aSt6Yr_ho3$Xhdkuw~>tMG7|yL3*+f(IfYiP<@J^%C6fIuNwJnw zUa^)RASAX?_Kmy`At}Stq6aFq+yF(5zXLclcBd=McM*rG2O+@eUlH4yO(A79do(01 z`K4hFwk#XO@({~1-8)*;K%3Q)8dx%}$Q^IBDnGOX3j7PJLuzPms{>VQH=w`s~+Uo7jvvog0grVdQ4OfQ~P|4B1v&X{SM zKu`aj@RG%5GG-mk;s@@F7XM^=Z5S)n9)4Ck-`?zQlGJOpMQE{d%8g;Q%8_M#Pb^Fx zC@41%fp6xBnJqVJAq&Y+u*jxRtyO4kMKpSH25x7bvrN0dMau*ZPH=*%VwpjKWd`v#ngq-8xBGhcXnmTYjOD56SnrWXS zua+I*P!w7a9yYR}%vTVrnE5`ql?X-9&_2I%vp*x%% zEXs4udJ*C40~kD-=6rjmb=(#^Bt~yJIx!}QC$`*-t>j2?irbf2qem$A@T8^8`hT##~}!uCey?Y%4Chxts__GUOr&Imrs?sRhzX zCkNtLWD{qxAorZb5iK>Pl}=&Ub36VXP+)>n5n!VRPiF!uQZxbH#%!iRb3R?z-w&~& z${C6jEwRMLJ2bF_ZL>v7oC!v<#90z@wiU9hEs54TCs486TFectbuPnYZ?ort4J>*- z0^eMKr?V(CRL!D45cY*3Hq^aHk)lO=)lfa#M2qJ9A-Kq#7fZq=RzjZ}PGhWv{wPqd z+?vb@t@&eyeQ%+qg3M3A1;)G-0X|BNr!ywAQ^AyX5Sj-r$C&Zs%g$Pb-+0n zQ(JSnQdY}!cXiC}=;%Uclx?tnwO0T#b%oSG4jl8%1+y4&d~q7jF}VTw8<;yNHwb@& z@mGVtA^01LzhU?r?j4RnO>PAKR>9w@xyiZJ@V9!dHMfR$IzemVZ!P?-jlXs9w=VwH z!{7S&8;QSB_}c(~8{#j4zm4!WIybtT*1Qrr3b zwb$Kc#@r$FLHXvOsP04@MKvs{Y{C3eq<4{YnA5{*csJ5~a}Ob_;60ARCrw=q_E1p4 z+$*wQ5jd!j?cx^)3+6sV#}u8saX5u15t?iE*Y)u==6=9C%maAhv%|zqt>*06(3okO zgIvxjme0{!^$M_A-p;^n?Q zJv9RcXby6E51tB{Zj+5y+1qOVqOzVl{sqn+TCdRE*p=@>My(0|CZ0oV zFU?O>%`tBac{tlcq$2-asUylt%)Eo-RR8X-+2gh||KRVMfE8QOKP@L+xEL0)d6#H| zMKZ1cbG^s-hz8uApJ{|%ff3&q?O^mY!KsQ5h{hI|X>6F6Z^mJW50Nr;P-=K-B{lyd z;_wO6_nXp?-l}fvaa(M^&35J^CJt-}-bNoImfI;c0%sAsvh%yxFk#^?^9j-yr3Uke z9o|wXw0|REdTO{TSkavSQR>h_8%}}PlRX9VDbh|#4P`wFdy&tOmP)u@1h@UybuPfS z)7sUGG-v0I2R3f~|1Z(-GHCD0%x}o$TXL=V-(@~$^2)6pIQZWW4iBl3rFJZ9j4J@e zQzIG9&bXmyxD`pfV!qj|#2>@aDxwu&Wh9=ITHkg6I7^`p3I^`VH8cg?L4Tm2IRz!7 zW>PQ%kT}Z>B+IqbaPl~NXxPo5q*;*7W)Ogj;C7Lj7bXox65kgxXxPWsAa#0oV`|Ov zx0)Rf3?Wg!)Jhx+48?EUQHv+W0^WtVU&ZgYQn*^2WQ|{XWE<$2KK4``2vouF0wN-PQX?O4)0(* zKG-{HI1p>R!?7lr8{+YE-`;5$Z+L<9HNeL=Yl^d#zAfh77jZqzLjMWNnY3f$3KR4RA9h}`ulmrRx@cZ7F51Y(qRQEoNw z5dNH;+ujbcw#}{Sy@jRF+*)2M7S(cV+s`(x<2^~VLEdDZA6eJCk-BogX7yUnTOBK| zx%IuRux6VZ=}p4d;&Y?yN6|L$b|SwGy{nm+u!r$C@@^V}*l6!D28x%9kQVrHM+>Ef*7a3e%~GaAqC H#=8FpJM<+r literal 0 HcmV?d00001 diff --git a/doc/build/doctrees/improver.grids.doctree b/doc/build/doctrees/improver.grids.doctree new file mode 100644 index 0000000000000000000000000000000000000000..112cc97631f88d30f4175b4de17970bef635883f GIT binary patch literal 4315 zcmcIn2X`D-6_({l+Fe_cn-p;5cmr{=hDhr;POuFmBoq@w)`0WELNZKdXWs4`JnhbV zqXGtt|R9{+3VE( z&`CT_qpkKNPUN5Jb{Hralr+|APyffodSuxrtsbXJt6jsdGU?>^Bvo4p+m6ptxe5@& zz;t+5QZ1z|Y$CE^$xWY2iOZ6<8Z^!(>?Drylqtg`wmD6-Sk2*f7+d-_O}5w$D^X!L z3{?;+Zc1gvinh&fVq@AvxIx=f+R+fBYJbzllkAXedl(G3Y#9d zRJt}Ls;sugvFU_WY~gwiz|y$}-DxlglG_?_jqZ#X>`v)C#2~g73%6$25F3U6@Rat5 zVK!QPe}4N)5ZDa3YKqY3O>y90(-L8HvMFP`Sz>D7Y6r zD}c}itY#?{yPZUV?7O6N*9NONNuHgYt$@R79C)Yx)Asi=ABi_h2!a`By3ZyRMNr*h=ZRcJM5H(|=QP zp4;iyS~F`XP2EuDqUEE=dx^pddiAhd--x?M=mgFH&?9x~+}O~0i|ozQct2_njUaZp zH0Y)VngMK~2OI4fAltxTy^#lMf5y$3gT{thiXN4PYf}-fq%&9^=t5R6wjxm@6Xe{S zlBMH#6i;85&4Rif60W2DQ-arzpE!0+{dnwHfz9hDl6ZkH*V%l%l{IL~s~^KGnWIh~ z=l?smV7A)#fNagKvr$Hs{u4(={g1wNk#W70gw8na=gnEh@uFe4DLuN(uA+P84ddiG zT;w&{Q{Z^R=#ejWWz`mlu`GPs%W4brYKu}rJD$mAbP=Wg7>fbPU0Y&u-NQwR^(`t8 zqDTFpnFL-gq_n8L)Fz|6C+Y%PN@=-t5ihlG#PzeztPKjtG~K{erg@ekqQ{83$O`AN zcThNwYY3x#E|9D*nAKW2BimV-qyuGwaN?h(J3rf^Vk?l{F!p&&k8iQ@zWm5W7WnnpJIAWwj+~ z#h|C^L||J>m6Q$Nr$H3;5w7Z~ryKMP4J!&`1>DbUv5M;yg6UZhOyg^@NP4zG&tc=` zV>y85ru00v4V}x4A!kfztsaL8e7-?1U=_)gLQ|0R!ZmZg8)s6{soh|C3A8A^D5Mvs z^pXbKQBq-QZ2{mXCA}0dcEWTRI}nBuM~@C{v_X1Vi*1d&HjY9eCb35^XEQ>nNFHrA zRTx6*x?FYV!noHI%J-V|3RVRLHrKZ4m8T&td8B>dkXSYhzN*D0bd!!uJM^_Umh@@_ zQib}B63%Pbi0;G7^jbFVSwSy>F2#9=)9ctUbl&S()v=(Sl;ubC1`RPI*Yx-T_vnq# zKwIGkv~G~A)i*)B3d>W)fj7^Ou<-&!A+_Gp5(isshR1Ojn?M@sHf!b|g%vm+0{2$7 zO+%EpBJiLGFX?S;#NnN!M{jSjZQSp09oS`$OToFKcdW2IVWOf$oyq1qF=W7ot>|_@ zLTrP?Hqg7RTO$V-t&39B9C>#Vw!S+hS9i_pa+@ zQ8N|yfmd;QAFJSp*?k8NIrM(EoiCy6L8D4!!OaK2%_v$}*X_{zNqqG4=)o_QR*y9(dySD{VFEri6Ou^pO>|nFkB>(N^JCBzfY%58tDY zY5JC;5LxD8#fbyUlk{=alja0+KU1BB$DdeXTXNBD>g=UY0;bNv`xYFu?%7T|Kv{)4t9N|HCiN1)& zR8>~TTyz~J=u7hzHnBdB=*w*4>eh*C&Guz;hh{H5a>Zf#3L7gotFMZ^Y!ct6@PEE86_`3ACH`q;TlO0a)P`sNDTs$<@-)NiqRS*eSQi)igi zgh5uR4qs^EsBF@=b$<+$wPXsnCp`RD^c}1cVbG)RvYGX~43xw7#O-WCw=Q#)i|?~x z*P$N(ZHTQ4J4O+*AFi=xJzwe^8I* zO}0kA!l3T{haWoBdw+78E69ri7w6t+%6mFDiu()X7dL z?|Gr^{voA5mUguohd>pXy+4)q{w(ItYHv#HPEaXpxBGko{w1Zq!r~ml>f3Q)V+E_OKnbNP7;p5dd35dDect&_g+dsH?TD5A`JP#{zjXOGa zf?8e1b6lRPoaS^-2XS;V^%SQ@VH-q#P~pn)BHz(oIfSF$V!}pLQdT<9SU-+jpU)`j zOR1j?);+1XTjNT3RoLcAw2lo{5-9=9RdFIKoYogv&gYd_+NEfq$VRR< zhBlPwxctf#A}x&!ZbSw*rF1+pD82a%_vWcPkdcE~%Gwj=)I6oRbY_B(pW##__cZsN z@?uWaMG!5FQ4&;UZD39uIp@p3xxia=_MYeLJ#%8;{)v5ioqc=u?jO%x9w$!4iyY7Q z>e|sFP}ZdbWc8419xfW%7LiCN3}%CjuM?IMm%v!iOv!|ULsDN>=jv_ z&pxW9ut|hGN9SOf{VAP`W$Yew*^xb{*-%ICLFWN}A;Z&c9npkAbI&$30MdrxUgGq- zzr?+ft^1^OU*HmEx&0lpg(mT;(yNOc-4B5rNa_A2kR51~0~a1_-w7t12~^+2bhA&U z=>aWa^e?-j2bxeG)G3tyW;)N$BbE)FSD^<3`a@DWXpkJk1`8hm_l-Sh-1Gg$Nm1w6 z)uXDrfbPoCLlMzrN)I!BhOpL3_!k&284K%Mr&7^G@bEnAg&%XsOs&Cm1Wy;*p?Ob6 zbkSjKq4?R8Rj%m~Q++ULrpEr_$2DwIDAgUNy4X4x^M!gCR06$}72PQ%)`MZ=`QoW= z8cp_JSN;ir6koY0A)hfkAtDmT!7wo~Ka&@xX{sqD>848_1#PYIiBBPSUUwywpBq)EHVv!2Ev>8aHKaC(F8Wr7Wr zL(R~WqjnLqo*QUh%h4Pbolj}OB*m61NSv6i^2ip&G%W&qJuBD#<$3g21LJWWl2dH1 z`gmb_J5A{cCI!aLe3n?-saJNG3&^OItAuHxLn%G6IWf&9+6H;jiUt8*anWJXXoV*3 zmR7J`q_j_0-(^dPi-m0f3*j1JD*f7&p4@ zMDf(D;+u%*X())Nr}PYXv=x7?t$5L>bQ~%8Ckz?arSweLw2)DYvAv)}r|DTo$Pku+ zXLrcrG6T=C3|ybmb4?Ux?scsUJg>vt$zg;2`IdnjQhGsi;`OZzyzs6VXoBs;z>Rl{ z0Sn5#(Z^uN}o|CW?qXPgbJbVjd-x0_oDc|!+plOraS zH(Ek&P3cW08)o7yt%SV!ScLSLBDL5%(Q(_|qQe4si>2fCl-}9^06N~*q~q;|j@#Pk zct<-Ox0)E<2|AA8p?86dcc=6o__Ji(el!`4?VlS1woXegQZuovaY^qjvVmoJ1A-L2 zFQxahK74FkUq6u22idS%td--?ZCWb&P>DXwM%u)Yo&Av#eUu%y!rZk#eXK+uXZ<)l zt+-P3i4uL%>~L&-Q-#@E@~0rWmJuG&Yo9LBXAG=*ETLxTjw0&|{7jI27J_V`6^o+J zmFV+qpt)EBz!y^bA{$2kTan>54t_;nD$$o&pW<5M=Aq~-EnEDIgjB(mBzLJ7CJ?gp z)tJ7P(%18Bw3!OmXaNA9RrC$O*Z}5QxY| zMbn*t1pN>tHFv>OIQ%M54@VwAE63)eQfuhvwZp!8{rE9UJy{$iOO>GXXK^_GIu7Z(qA&JD%zb3 zRG?!?e}(5t7<;oW#|<-das7qJG_vt;huL;SQd{n{Bdog(Mf&$8wuVP@^p9c|S6%VM zM;x_~{%O*;P>)e%0qRypUa06_=qHmCyo*?&l12RQCAO}ik#S{;X6ZkGX==2k%KnQb zRwQ1o!WD^iCwgY=T)EnO&Sg%JAlZchxCx?GuH+hH)^Debsh}#?n(srdA1L!`gNih1 zlHGQ0KYAL1dnQY_Pq)WS=n{JW#y&pgVQu_*r9n&pYQ_ z*>47Wvv(bt!#A;&4dJ4%A+Jr>K;CP)4&zV(xL9yJ`NUGL#{eR*1+$z&IbcWEs~TQ^ zTuSf~q~svRmKan=eYvgW5XP9(?CkmZ`SJRq7I9=d+vjrySgV2@HfGpT@d~owil8b& z{MT~C46hC2s47Rz$8GI-Z0{qv0Vd*3HfXjz_ej=`Gc&saUv4xX22tfLq{~ehTw+@r zUCTpmo|F`_tA(3AT*%{Lg7(O0vBj_CW(=0tCff-?gy#_ZxW*$xpf9)J*TaxU-aHz# zF?0fk%%*G;+1y&?R^vM?@1hj?bw)5r6Yy6gpx=6cW^N>)W0^ zpX5o_(7|7^4X4S8sXW4y6OItVksuh3q=Gnxp?9Wtd(h1ERCkXK zU`Y%lXv2Nq_kG{|eY~EXm1TV&KS-ZeTV3_~)vMR_;&hzoxN>P`Hwv9d+wmWBS-1}0 zarGuo4|!&d+J(ra+b^Z%*&)wu%}h;AWoeJ+14}1u)w3>j$f1iUk$uYxLvGP>DU6j- zF0B;<9BhZ&q2;(T2FW9&_y?(m(s%jqGX!Cj2lZK|K8DVhgN0OE6M zn&zAT-?jB9F4!%k>nfeguDbS6S14U>cdd!LXVxlXD~+6`kJpQi;U z1|BY7poInj-%TCioJo6?0~r^Gd})nZNmcqd%lCw|5NE<#{C#got)7Utji>T*NHhM{ z_l5i_wJK^t z9(MBe>YO^Q?o#K~1)8hl_yGl!>uc(sP~97<%b~h2R9C`Ep_pn)Y#gufoZN$Ox`%16 zy>ycw9P&dbrF@C2RZmpC-#jULz2RY@iQu>Rnopo# zMV0rYkqN(bbBdPIBF?Hrsx(O;zb8lP+|~kg(`bR;hL(!5*7Z{X%?>i%)_Sq(W|qf-+qB0sGFzHRxFfN>{M zYm>kVOPrHM?Ay5=(4y(b2!$tBCgY4QD(6c3c(?1chWn<{^*6K`bd}4qF6Yz+1+nzG zxj2BG%F^Ef@+)E_)B!D9NTrN08s4uhmuMs;djt;CoY&Jn8(PXlF{t3gh)*P2nucZ_ zwG#n9aw0Fe@(^osqfG9|jCbHFi?{~dy%_uRF6>%~%ryafn{%|(fN0EXKTr<@bWs|k zO$4Mm0i)6QqY_1up>dz0RevjsBMJpG+>$>{bBXL#1O7}vs}fV@8@mr#D=8fhM|4S7 zuB_bu%lTQDH`s_BzW_++Ek-rk^9tt|(UX;=%FF0bQ4^mP;VIfO0m3R8H% z$R$P2pAYDa=iLvO?^@(!4!lnC7pR3V*6+V|J>i$=q})sKra{(~ZMbfTQs~wcBD4Gz%#-H?FLiA@&5gf0q7&oyEAr~)uK}jl=n<8D z9VLzt&)=ZA%JtW8@HeT`9QT6$7EY(d(H8Nysa;2|TafS_T8Wa>iXNzJc=>KXZA@^9 zg6STA560AXq8II^3C!#Jo7f?4*Af4KmY)oseJ0v^c;ou|gO5FOgMUa1lU?FR>O8Gr zoE_dEKNR4~@sAIvJ&=XOf&tcl0*tj1ZgKNdsNUw-G9s?9aajHteAV~-a8iFxmnW0D zySuwl?mMN6dQua)(?zK5@-MtU7EbI%Djlc{KaPKi$5|Hx{uN!kotIxSK0w3h!6(%D+RX + + + + + + + improver.ensemble_calibration.ensemble_calibration — Improver documentation + + + + + + + + + + + + + + +

+ +
+
+
+
+ +

Source code for improver.ensemble_calibration.ensemble_calibration

+# -*- coding: utf-8 -*-
+# -----------------------------------------------------------------------------
+# (C) British Crown Copyright 2017 Met Office.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+"""
+This module defines all the "plugins" specific for ensemble calibration.
+
+"""
+import copy
+import numpy as np
+import random
+from scipy import stats
+from scipy.optimize import minimize
+from scipy.stats import norm
+import warnings
+
+import cf_units as unit
+import iris
+
+from improver.ensemble_calibration.ensemble_calibration_utilities import (
+    convert_cube_data_to_2d, concatenate_cubes, rename_coordinate,
+    check_predictor_of_mean_flag)
+
+
+
[docs]class ContinuousRankedProbabilityScoreMinimisers(object): + """ + Minimise the Continuous Ranked Probability Score (CRPS) + + Calculate the optimised coefficients for minimising the CRPS based on + assuming a particular probability distribution for the phenomenon being + minimised. + + The number of coefficients that will be optimised depend upon the initial + guess. + + Minimisation is performed using the Nelder-Mead algorithm for 200 + iterations to limit the computational expense. + Note that the BFGS algorithm was initially trialled but had a bug + in comparison to comparative results generated in R. + + """ + + # Maximum iterations for minimisation using Nelder-Mead. + MAX_ITERATIONS = 200 + + # The tolerated percentage change for the final iteration when + # performing the minimisation. + TOLERATED_PERCENTAGE_CHANGE = 5 + + # An arbitrary value set if an infinite value is detected + # as part of the minimisation. + BAD_VALUE = np.float64(999999) + + def __init__(self): + # Dictionary containing the minimisation functions, which will + # be used, depending upon the distribution, which is requested. + self.minimisation_dict = { + "gaussian": self.normal_crps_minimiser, + "truncated gaussian": self.truncated_normal_crps_minimiser} + +
[docs] def crps_minimiser_wrapper( + self, initial_guess, forecast_predictor, truth, forecast_var, + predictor_of_mean_flag, distribution): + """ + Function to pass a given minimisation function to the scipy minimize + function to estimate optimised values for the coefficients. + + Parameters + ---------- + initial_guess : List + List of optimised coefficients. + Order of coefficients is [c, d, a, b]. + forecast_predictor : Iris cube + Cube containing the fields to be used as the predictor, + either the ensemble mean or the ensemble members. + truth : Iris cube + Cube containing the field, which will be used as truth. + forecast_var : Iris cube + Cube containg the field containing the ensemble variance. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + distribution : String + String used to access the appropriate minimisation function + within self.minimisation_dict. + + Returns + ------- + optimised_coeffs : List + List of optimised coefficients. + Order of coefficients is [c, d, a, b]. + + """ + def calculate_percentage_change_in_last_iteration(allvecs): + """ + Calculate the percentage change that has occurred within + the last iteration of the minimisation. If the percentage change + between the last iteration and the last-but-one iteration exceeds + the threshold, a warning message is printed. + + Parameters + ---------- + allvecs : List + List of numpy arrays containing the optimised coefficients, + after each iteration. + """ + last_iteration_percentage_change = np.absolute( + (allvecs[-1] - allvecs[-2]) / allvecs[-2])*100 + if (np.any(last_iteration_percentage_change > + self.TOLERATED_PERCENTAGE_CHANGE)): + np.set_printoptions(suppress=True) + msg = ("\nThe final iteration resulted in a percentage change " + "that is greater than the accepted threshold of 5% " + "i.e. {}. " + "\nA satisfactory minimisation has not been achieved. " + "\nLast iteration: {}, " + "\nLast-but-one iteration: {}" + "\nAbsolute difference: {}\n").format( + last_iteration_percentage_change, allvecs[-1], + allvecs[-2], np.absolute(allvecs[-2]-allvecs[-1])) + warnings.warn(msg) + + try: + minimisation_function = self.minimisation_dict[distribution] + except KeyError as err: + msg = ("Distribution requested {} is not supported in {}" + "Error message is {}".format( + distribution, self.minimisation_dict, err)) + raise KeyError(msg) + + # Ensure predictor_of_mean_flag is valid. + check_predictor_of_mean_flag(predictor_of_mean_flag) + + if predictor_of_mean_flag.lower() in ["mean"]: + forecast_predictor_data = forecast_predictor.data.flatten() + truth_data = truth.data.flatten() + forecast_var_data = forecast_var.data.flatten() + elif predictor_of_mean_flag.lower() in ["members"]: + truth_data = truth.data.flatten() + forecast_predictor_data = convert_cube_data_to_2d( + forecast_predictor) + forecast_var_data = forecast_var.data.flatten() + + initial_guess = np.array(initial_guess, dtype=np.float32) + forecast_predictor_data = forecast_predictor_data.astype(np.float32) + forecast_var_data = forecast_var_data.astype(np.float32) + truth_data = truth_data.astype(np.float32) + sqrt_pi = np.sqrt(np.pi).astype(np.float32) + + optimised_coeffs = minimize( + minimisation_function, initial_guess, + args=(forecast_predictor_data, truth_data, + forecast_var_data, sqrt_pi, predictor_of_mean_flag), + method="Nelder-Mead", + options={"maxiter": self.MAX_ITERATIONS, "return_all": True}) + if not optimised_coeffs.success: + msg = ("Minimisation did not result in convergence after " + "{} iterations. \n{}".format( + self.MAX_ITERATIONS, optimised_coeffs.message)) + warnings.warn(msg) + calculate_percentage_change_in_last_iteration(optimised_coeffs.allvecs) + return optimised_coeffs.x
+ +
[docs] def normal_crps_minimiser( + self, initial_guess, forecast_predictor, truth, forecast_var, + sqrt_pi, predictor_of_mean_flag): + """ + Minimisation function to calculate coefficients based on minimising the + CRPS for a normal distribution. + + Scientific Reference: + Gneiting, T. et al., 2005. + Calibrated Probabilistic Forecasting Using Ensemble Model Output + Statistics and Minimum CRPS Estimation. + Monthly Weather Review, 133(5), pp.1098-1118. + + Parameters + ---------- + initial_guess : List + List of optimised coefficients. + Order of coefficients is [c, d, a, b]. + forecast_predictor : Numpy array + Data to be used as the predictor, + either the ensemble mean or the ensemble members. + truth : Numpy array + Data to be used as truth. + forecast_var : Numpy array + Ensemble variance data. + sqrt_pi : Numpy array + Square root of Pi + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + Returns + ------- + result : Float + Minimum value for the CRPS achieved. + + """ + if predictor_of_mean_flag.lower() in ["mean"]: + beta = initial_guess[2:] + elif predictor_of_mean_flag.lower() in ["members"]: + beta = np.array([initial_guess[2]]+(initial_guess[3:]**2).tolist()) + + new_col = np.ones(truth.shape) + all_data = np.column_stack((new_col, forecast_predictor)) + mu = np.dot(all_data, beta) + sigma = np.sqrt( + initial_guess[0]**2 + initial_guess[1]**2 * forecast_var) + xz = (truth - mu) / sigma + normal_cdf = norm.cdf(xz) + normal_pdf = norm.pdf(xz) + result = np.nansum( + sigma * (xz * (2 * normal_cdf - 1) + 2 * normal_pdf - 1 / sqrt_pi)) + if not np.isfinite(np.min(mu/sigma)): + result = self.BAD_VALUE + return result
+ +
[docs] def truncated_normal_crps_minimiser( + self, initial_guess, forecast_predictor, truth, forecast_var, + sqrt_pi, predictor_of_mean_flag): + """ + Minimisation function to calculate coefficients based on minimising the + CRPS for a truncated_normal distribution. + + Scientific Reference: + Thorarinsdottir, T.L. & Gneiting, T., 2010. + Probabilistic forecasts of wind speed: Ensemble model + output statistics by using heteroscedastic censored regression. + Journal of the Royal Statistical Society. + Series A: Statistics in Society, 173(2), pp.371-388. + + Parameters + ---------- + initial_guess : List + List of optimised coefficients. + Order of coefficients is [c, d, a, b]. + forecast_predictor : Numpy array + Data to be used as the predictor, + either the ensemble mean or the ensemble members. + truth : Numpy array + Data to be used as truth. + forecast_var : Numpy array + Ensemble variance data. + sqrt_pi : Numpy array + Square root of Pi + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + Returns + ------- + result : Float + Minimum value for the CRPS achieved. + + """ + if predictor_of_mean_flag.lower() in ["mean"]: + beta = initial_guess[2:] + elif predictor_of_mean_flag.lower() in ["members"]: + beta = np.array([initial_guess[2]]+(initial_guess[3:]**2).tolist()) + + new_col = np.ones(truth.shape) + all_data = np.column_stack((new_col, forecast_predictor)) + mu = np.dot(all_data, beta) + sigma = np.sqrt( + initial_guess[0]**2 + initial_guess[1]**2 * forecast_var) + xz = (truth - mu) / sigma + normal_cdf = norm.cdf(xz) + normal_pdf = norm.pdf(xz) + x0 = mu / sigma + normal_cdf_0 = norm.cdf(x0) + normal_cdf_root_two = norm.cdf(np.sqrt(2) * x0) + result = np.nansum( + (sigma / normal_cdf_0**2) * + (xz * normal_cdf_0 * (2 * normal_cdf + normal_cdf_0 - 2) + + 2 * normal_pdf * normal_cdf_0 - + normal_cdf_root_two / sqrt_pi)) + if not np.isfinite(np.min(mu/sigma)) or (np.min(mu/sigma) < -3): + result = self.BAD_VALUE + return result
+ + +
[docs]class EstimateCoefficientsForEnsembleCalibration(object): + """ + Class focussing on estimating the optimised coefficients for ensemble + calibration. + """ + # Logical flag for whether initial guess estimates for the coefficients + # will be estimated using linear regression i.e. + # ESTIMATE_COEFFICIENTS_FROM_LINEAR_MODEL_FLAG = True, or whether default + # values will be used instead i.e. + # ESTIMATE_COEFFICIENTS_FROM_LINEAR_MODEL_FLAG = False. + ESTIMATE_COEFFICIENTS_FROM_LINEAR_MODEL_FLAG = True + + def __init__(self, distribution, desired_units, + predictor_of_mean_flag="mean"): + """ + Create an ensemble calibration plugin that, for Nonhomogeneous Gaussian + Regression, calculates coefficients based on historical forecasts and + applies the coefficients to the current forecast. + + Parameters + ---------- + distribution : String + Name of distribution. Assume that the current forecast can be + represented using this distribution. + desired_units : String or cf_units.Unit + The unit that you would like the calibration to be undertaken in. + The current forecast, historical forecast and truth will be + converted as required. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + """ + self.distribution = distribution + self.desired_units = desired_units + self.predictor_of_mean_flag = predictor_of_mean_flag + self.minimiser = ContinuousRankedProbabilityScoreMinimisers() + + import imp + try: + statsmodels_found = imp.find_module('statsmodels') + statsmodels_found = True + import statsmodels.api as sm + self.sm = sm + except ImportError: + statsmodels_found = False + if predictor_of_mean_flag.lower() in ["members"]: + msg = ( + "The statsmodels can not be imported. " + "Will not be able to calculate an initial guess from " + "the individual ensemble members. " + "A default initial guess will be used without " + "estimating coefficients from a linear model.") + warnings.warn(msg) + self.statsmodels_found = statsmodels_found + + def __str__(self): + result = ('<EstimateCoefficientsForEnsembleCalibration: ' + 'distribution: {};' + + 'desired_units: {}>' + + 'predictor_of_mean_flag: {}>' + + 'minimiser: {}') + return result.format( + self.distribution, self.desired_units, + self.predictor_of_mean_flag, self.minimiser) + +
[docs] def compute_initial_guess( + self, truth, forecast_predictor, predictor_of_mean_flag, + estimate_coefficients_from_linear_model_flag, no_of_members=None): + """ + Function to compute initial guess of the a and beta components of the + EMOS coefficients by linear regression of the forecast predictor + and the truth, if requested. Otherwise, default values for a and b + will be used. + + Default values have been chosen based on Figure 8 in the + ensemble calibration documentation in + https://exxreldocs:8099/display/TEPPV/Science+Plugin+Documents + or + http://www-nwp/~gevans/reports/EMOS_report_20170327.pdf + + Parameters + ---------- + truth : Iris cube + Cube containing the field, which will be used as truth. + forecast_predictor : Iris cube + Cube containing the fields to be used as the predictor, + either the ensemble mean or the ensemble members. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + estimate_coefficients_from_linear_model_flag : Logical + Flag whether coefficients should be estimated from + the linear regression, or static estimates should be used. + no_of_members : Int + Number of members, if ensemble members are to be used as + predictors. Default is None. + + Returns + ------- + initial_guess : List + List of coefficients to be used as initial guess. + Order of coefficients is [c, d, a, b]. + + """ + + if (predictor_of_mean_flag.lower() in ["mean"] and + not estimate_coefficients_from_linear_model_flag): + initial_guess = [1, 1, 0, 1] + elif (predictor_of_mean_flag.lower() in ["members"] and + not estimate_coefficients_from_linear_model_flag): + initial_guess = [1, 1, 0] + np.repeat(1, no_of_members).tolist() + elif estimate_coefficients_from_linear_model_flag: + if predictor_of_mean_flag.lower() in ["mean"]: + # Find all values that are not NaN. + truth_not_nan = ~np.isnan(truth.data.flatten()) + forecast_not_nan = ~np.isnan(forecast_predictor.data.flatten()) + combined_not_nan = ( + np.all( + np.row_stack([truth_not_nan, forecast_not_nan]), + axis=0)) + if not any(combined_not_nan): + gradient, intercept = ([np.nan, np.nan]) + else: + gradient, intercept, _, _, _ = ( + stats.linregress( + forecast_predictor.data.flatten()[ + combined_not_nan], + truth.data.flatten()[combined_not_nan])) + initial_guess = [1, 1, intercept, gradient] + elif predictor_of_mean_flag.lower() in ["members"]: + if self.statsmodels_found: + truth_data = truth.data.flatten() + forecast_data = np.array( + convert_cube_data_to_2d( + forecast_predictor, transpose=False)) + # Find all values that are not NaN. + truth_not_nan = ~np.isnan(truth_data) + forecast_not_nan = ~np.isnan(forecast_data) + combined_not_nan = ( + np.all( + np.row_stack([truth_not_nan, forecast_not_nan]), + axis=0)) + val = self.sm.add_constant( + forecast_data[:, combined_not_nan].T) + est = self.sm.OLS(truth_data[combined_not_nan], val).fit() + intercept = est.params[0] + gradient = est.params[1:] + initial_guess = [1, 1, intercept]+gradient.tolist() + else: + initial_guess = ( + [1, 1, 0] + np.repeat(1, no_of_members).tolist()) + return initial_guess
+ +
[docs] def estimate_coefficients_for_ngr( + self, current_forecast, historic_forecast, truth): + """ + Using Nonhomogeneous Gaussian Regression/Ensemble Model Output + Statistics, estimate the required coefficients from historical + forecasts. + + The main contents of this method is: + 1. Metadata checks to ensure that the current forecast, historic + forecast and truth exist in a form that can be processed. + 2. Loop through times within the concatenated current forecast cube. + a. Extract the desired forecast period from the historic forecasts + to match the current forecasts. Apply unit conversion to ensure + that historic forecasts have the desired units for calibration. + b. Extract the relevant truth to co-incide with the time within + the historic forecasts. Apply unit conversion to ensure + that the truth has the desired units for calibration. + c. Calculate mean and variance. + d. Calculate initial guess at coefficient values by performing a + linear regression, if requested, otherwise default values are + used. + e. Perform minimisation. + + Parameters + ---------- + current_forecast : Iris Cube or CubeList + The cube containing the current forecast. + historical_forecast : Iris Cube or CubeList + The cube or cubelist containing the historical forecasts used for + calibration. + truth : Iris Cube or CubeList + The cube or cubelist containing the truth used for calibration. + + Returns + ------- + optimised_coeffs : Dictionary + Dictionary containing a list of the optimised coefficients + for each date. + coeff_names : List + The name of each coefficient. + + """ + def convert_to_cubelist(cubes, cube_type="forecast"): + """ + Convert cube to cubelist, if necessary. + + Parameters + ---------- + cubes : Iris Cube or Iris CubeList + Cube to be converted to CubeList. + cube_type : String + String to describe the cube, which is being converted to a + CubeList. + + """ + if not isinstance(cubes, iris.cube.CubeList): + cubes = iris.cube.CubeList([cubes]) + for cube in cubes: + if not isinstance(cube, iris.cube.Cube): + msg = ("The input data within the {} " + "is not an Iris Cube.".format(cube_type)) + raise ValueError(msg) + return cubes + + # Ensure predictor_of_mean_flag is valid. + check_predictor_of_mean_flag(self.predictor_of_mean_flag) + + # Setting default values for optimised_coeffs and coeff_names. + optimised_coeffs = {} + coeff_names = ["gamma", "delta", "a", "beta"] + + # Set default values for whether there are NaN values within the + # initial guess. + nan_in_initial_guess = False + + for var in [current_forecast, historic_forecast, + truth]: + if (isinstance(var, iris.cube.Cube) or + isinstance(var, iris.cube.CubeList)): + current_forecast_cubes = current_forecast + historic_forecast_cubes = historic_forecast + truth_cubes = truth + else: + msg = ("{} is not a Cube or CubeList." + "Returning default values for optimised_coeffs {} " + "and coeff_names {}.").format( + var, optimised_coeffs, coeff_names) + warnings.warn(msg) + return optimised_coeffs, coeff_names + + current_forecast_cubes = ( + convert_to_cubelist( + current_forecast_cubes, cube_type="current forecast")) + historic_forecast_cubes = ( + convert_to_cubelist( + historic_forecast_cubes, cube_type="historic forecast")) + truth_cubes = convert_to_cubelist(truth_cubes, cube_type="truth") + + if (len(current_forecast_cubes) == 0 or + len(historic_forecast_cubes) == 0 or len(truth_cubes) == 0): + msg = ("Insufficient input data present to estimate " + "coefficients using NGR. " + "\nNumber of current_forecast_cubes: {}" + "\nNumber of historic_forecast_cubes: {}" + "\nNumber of truth_cubes: {}".format( + len(current_forecast_cubes), + len(historic_forecast_cubes), len(truth_cubes))) + warnings.warn(msg) + return optimised_coeffs, coeff_names + + rename_coordinate( + current_forecast_cubes, "ensemble_member_id", "realization") + rename_coordinate( + historic_forecast_cubes, "ensemble_member_id", "realization") + + current_forecast_cubes = concatenate_cubes( + current_forecast_cubes) + historic_forecast_cubes = concatenate_cubes( + historic_forecast_cubes) + truth_cubes = concatenate_cubes(truth_cubes) + + for current_forecast_cube in current_forecast_cubes.slices_over( + "time"): + date = unit.num2date( + current_forecast_cube.coord("time").points, + current_forecast_cube.coord("time").units.name, + current_forecast_cube.coord("time").units.calendar)[0] + # Extract desired forecast_period from historic_forecast_cubes. + forecast_period_constr = iris.Constraint( + forecast_period=current_forecast_cube.coord( + "forecast_period").points) + historic_forecast_cube = historic_forecast_cubes.extract( + forecast_period_constr) + + # Extract truth matching the time of the historic forecast. + truth_constr = iris.Constraint( + forecast_reference_time=historic_forecast_cube.coord( + "time").points) + truth_cube = truth_cubes.extract(truth_constr) + + if truth_cube is None: + msg = ("Unable to calibrate for the time points {} " + "as no truth data is available." + "Moving on to try to calibrate " + "next time point.".format( + historic_forecast_cube.coord("time").points)) + warnings.warn(msg) + continue + + # Make sure inputs have the same units. + historic_forecast_cube.convert_units(self.desired_units) + truth_cube.convert_units(self.desired_units) + + if self.predictor_of_mean_flag.lower() in ["mean"]: + no_of_members = None + forecast_predictor = historic_forecast_cube.collapsed( + "realization", iris.analysis.MEAN) + elif self.predictor_of_mean_flag.lower() in ["members"]: + no_of_members = len( + historic_forecast_cube.coord("realization").points) + forecast_predictor = historic_forecast_cube + + forecast_var = historic_forecast_cube.collapsed( + "realization", iris.analysis.VARIANCE) + + # Computing initial guess for EMOS coefficients + # If no initial guess from a previous iteration, or if there + # are NaNs in the initial guess, calculate an initial guess. + if "initial_guess" not in locals() or nan_in_initial_guess: + initial_guess = self.compute_initial_guess( + truth_cube, forecast_predictor, + self.predictor_of_mean_flag, + self.ESTIMATE_COEFFICIENTS_FROM_LINEAR_MODEL_FLAG, + no_of_members=no_of_members) + + if np.any(np.isnan(initial_guess)): + nan_in_initial_guess = True + + if not nan_in_initial_guess: + # Need to access the x attribute returned by the + # minimisation function. + optimised_coeffs[date] = ( + self.minimiser.crps_minimiser_wrapper( + initial_guess, forecast_predictor, + truth_cube, forecast_var, + self.predictor_of_mean_flag, + self.distribution.lower())) + initial_guess = optimised_coeffs[date] + else: + optimised_coeffs[date] = initial_guess + + return optimised_coeffs, coeff_names
+ + +
[docs]class ApplyCoefficientsFromEnsembleCalibration(object): + """ + Class to apply the optimised EMOS coefficients to future dates. + + """ + def __init__( + self, current_forecast, optimised_coeffs, coeff_names, + predictor_of_mean_flag="mean"): + """ + Create an ensemble calibration plugin that, for Nonhomogeneous Gaussian + Regression, applies coefficients created using on historical forecasts + and applies the coefficients to the current forecast. + + Parameters + ---------- + current_forecast : Iris Cube or CubeList + The Cube or CubeList containing the current forecast. + optimised_coeffs : Dictionary + Dictionary containing a list of the optimised coefficients + for each date. + coeff_names : List + The name of each coefficient. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + """ + self.current_forecast = current_forecast + self.optimised_coeffs = optimised_coeffs + self.coeff_names = coeff_names + self.predictor_of_mean_flag = predictor_of_mean_flag + + def _find_coords_of_length_one(self, cube, add_dimension=True): + """ + Function to find all coordinates with a length of 1. + + Parameters + ---------- + cube : Iris cube + Cube + add_dimension : Logical + Adds a dimension of 0 to each coordinate. A tuple is appended. + + Returns + ------- + length_one_coords : List or List of tuples + List of length one coordinates or list of tuples containing + length one coordinates and the dimension. + + """ + length_one_coords = [] + for coord in cube.coords(): + if len(coord.points) == 1: # Find length one coordinates + if add_dimension: + length_one_coords.append((coord, 0)) + else: + length_one_coords.append(coord) + return length_one_coords + + def _separate_length_one_coords_into_aux_and_dim( + self, length_one_coords, dim_coords=["time"]): + """ + Function to separate coordinates into auxiliary and dimension + coordinates. + + Parameters + ---------- + length_one_coords : Iterable of coordinates + The coordinates to be checked for length one coordinates. + dim_coords : List of coordinates + The length one coordinates to be made dimension coordinates. + + Returns + ------- + length_one_coords_for_aux_coords + List of length one coordinates to be auxiliary coordinates, i.e. + not in the dim_coords list. + length_one_coords_for_dim_coords + List of length one coordinates to be dimension coordinates, + according to dim_coords list. + + """ + length_one_coords_for_aux_coords = [] + length_one_coords_for_dim_coords = [] + for coord in length_one_coords: + if coord[0].name() in dim_coords: + length_one_coords_for_dim_coords.append(coord) + else: + length_one_coords_for_aux_coords.append(coord) + return ( + length_one_coords_for_aux_coords, + length_one_coords_for_dim_coords) + + def _create_coefficient_cube( + self, cube, optimised_coeffs_at_date, coeff_names): + """ + Function to create a cube to store the coefficients used in the + ensemble calibration. + + Parameters + ---------- + cube : Iterable of coordinates + The coordinates to be checked for length one coordinates. + optimised_coeffs_at_date : List of coefficients + Optimised coefficients for a particular date. + coeff_names : List + List of coefficient names. + + + Returns + ------- + coeff_cubes : Iris cube + Cube containing the coefficient value as the data array. + + """ + length_one_coords = self._find_coords_of_length_one(cube) + + length_one_coords_for_aux_coords, length_one_coords_for_dim_coords = ( + self._separate_length_one_coords_into_aux_and_dim( + length_one_coords)) + + coeff_cubes = iris.cube.CubeList([]) + for coeff, coeff_name in zip(optimised_coeffs_at_date, coeff_names): + cube = iris.cube.Cube( + [coeff], long_name=coeff_name, attributes=cube.attributes, + aux_coords_and_dims=length_one_coords_for_aux_coords, + dim_coords_and_dims=length_one_coords_for_dim_coords) + coeff_cubes.append(cube) + return coeff_cubes + +
[docs] def apply_params_entry(self): + """ + Wrapping function to calculate the forecast predictor and forecast + variance prior to applying coefficients to the current forecast. + + Returns + ------- + calibrated_forecast_predictor : CubeList + CubeList containing both the calibrated version of the ensemble + predictor, either the ensemble mean/members. + calibrated_forecast_variance : CubeList + CubeList containing both the calibrated version of the ensemble + variance, either the ensemble mean/members. + calibrated_forecast_coefficients : CubeList + CubeList containing both the coefficients for calibrating + the ensemble. + + """ + # Ensure predictor_of_mean_flag is valid. + check_predictor_of_mean_flag(self.predictor_of_mean_flag) + + rename_coordinate( + self.current_forecast, "ensemble_member_id", "realization") + + current_forecast_cubes = concatenate_cubes( + self.current_forecast) + + if self.predictor_of_mean_flag.lower() in ["mean"]: + forecast_predictors = current_forecast_cubes.collapsed( + "realization", iris.analysis.MEAN) + elif self.predictor_of_mean_flag.lower() in ["members"]: + forecast_predictors = current_forecast_cubes + + forecast_vars = current_forecast_cubes.collapsed( + "realization", iris.analysis.VARIANCE) + + (calibrated_forecast_predictor, calibrated_forecast_var, + calibrated_forecast_coefficients) = self._apply_params( + forecast_predictors, forecast_vars, self.optimised_coeffs, + self.coeff_names, self.predictor_of_mean_flag) + return (calibrated_forecast_predictor, + calibrated_forecast_var, + calibrated_forecast_coefficients)
+ + def _apply_params( + self, forecast_predictors, forecast_vars, optimised_coeffs, + coeff_names, predictor_of_mean_flag): + """ + Function to apply EMOS coefficients to all required dates. + + Parameters + ---------- + forecast_predictors : Iris cube + Cube containing the forecast predictor e.g. ensemble mean + or ensemble members. + forecast_vars : Iris cube. + Cube containing the forecast variance e.g. ensemble variance. + optimised_coeffs : List + Coefficients for all dates. + coeff_names : List + Coefficient names. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + Returns + ------- + calibrated_forecast_predictor_all_dates : CubeList + List of cubes containing the calibrated forecast predictor. + calibrated_forecast_var_all_dates : CubeList + List of cubes containing the calibrated forecast variance. + calibrated_forecast_coefficients_all_dates : CubeList + List of cubes containing the coefficients used for calibration. + + """ + calibrated_forecast_predictor_all_dates = iris.cube.CubeList() + calibrated_forecast_var_all_dates = iris.cube.CubeList() + calibrated_forecast_coefficients_all_dates = iris.cube.CubeList() + + for forecast_predictor, forecast_var in zip( + forecast_predictors.slices_over("time"), + forecast_vars.slices_over("time")): + + date = unit.num2date( + forecast_predictor.coord("time").points, + forecast_predictor.coord("time").units.name, + forecast_predictor.coord("time").units.calendar)[0] + + with iris.FUTURE.context(cell_datetime_objects=True): + constr = iris.Constraint(time=date) + forecast_predictor_at_date = forecast_predictor.extract(constr) + forecast_var_at_date = forecast_var.extract(constr) + + # If the coefficients are not available for the date, use the + # raw ensemble forecast as the calibrated ensemble forecast. + if date not in optimised_coeffs.keys(): + msg = ("Ensemble calibration not available " + "for forecasts with start time of {}. " + "Coefficients not available".format( + date.strftime("%Y%m%d%H%M"))) + warnings.warn(msg) + calibrated_forecast_predictor_at_date = ( + forecast_predictor_at_date.copy()) + calibrated_forecast_var_at_date = forecast_var_at_date.copy() + optimised_coeffs[date] = np.full(len(coeff_names), np.nan) + coeff_cubes = self._create_coefficient_cube( + forecast_predictor_at_date, optimised_coeffs, coeff_names) + else: + optimised_coeffs_at_date = ( + optimised_coeffs[date]) + + # Assigning coefficients to coefficient names. + if len(optimised_coeffs_at_date) == len(coeff_names): + optimised_coeffs_at_date = dict( + zip(coeff_names, optimised_coeffs_at_date)) + elif len(optimised_coeffs_at_date) > len(coeff_names): + excess_beta = ( + optimised_coeffs_at_date[len(coeff_names):].tolist()) + optimised_coeffs_at_date = ( + dict(zip(coeff_names, optimised_coeffs_at_date))) + optimised_coeffs_at_date["beta"] = np.array( + [optimised_coeffs_at_date["beta"]]+excess_beta) + else: + msg = ("Number of coefficient names {} with names {} " + "is not equal to the number of " + "optimised_coeffs_at_date values {} " + "with values {} or the number of " + "coefficients is not greater than the " + "number of coefficient names. Can not continue " + "if the number of coefficient names out number " + "the number of coefficients".format( + len(coeff_names), coeff_names, + len(optimised_coeffs_at_date), + optimised_coeffs_at_date)) + raise ValueError(msg) + + if predictor_of_mean_flag.lower() in ["mean"]: + # Calculate predicted mean = a + b*X, where X is the + # raw ensemble mean. In this case, b = beta. + beta = [optimised_coeffs_at_date["a"], + optimised_coeffs_at_date["beta"]] + forecast_predictor_flat = ( + forecast_predictor_at_date.data.flatten()) + new_col = np.ones(forecast_predictor_flat.shape) + all_data = np.column_stack( + (new_col, forecast_predictor_flat)) + predicted_mean = np.dot(all_data, beta) + calibrated_forecast_predictor_at_date = ( + forecast_predictor_at_date) + elif predictor_of_mean_flag.lower() in ["members"]: + # Calculate predicted mean = a + b*X, where X is the + # raw ensemble mean. In this case, b = beta^2. + beta = np.concatenate( + [[optimised_coeffs_at_date["a"]], + optimised_coeffs_at_date["beta"]**2]) + forecast_predictor_flat = ( + convert_cube_data_to_2d( + forecast_predictor_at_date)) + forecast_var_flat = forecast_var_at_date.data.flatten() + + new_col = np.ones(forecast_var_flat.shape) + all_data = ( + np.column_stack((new_col, forecast_predictor_flat))) + predicted_mean = np.dot(all_data, beta) + # Calculate mean of ensemble members, as only the + # calibrated ensemble mean will be returned. + calibrated_forecast_predictor_at_date = ( + forecast_predictor_at_date.collapsed( + "realization", iris.analysis.MEAN)) + + xlen = len(forecast_predictor_at_date.coord(axis="x").points) + ylen = len(forecast_predictor_at_date.coord(axis="y").points) + predicted_mean = np.reshape(predicted_mean, (ylen, xlen)) + calibrated_forecast_predictor_at_date.data = predicted_mean + + # Calculating the predicted variance, based on the + # raw variance S^2, where predicted variance = c + dS^2, + # where c = (gamma)^2 and d = (delta)^2 + predicted_var = (optimised_coeffs_at_date["gamma"]**2 + + optimised_coeffs_at_date["delta"]**2 * + forecast_var_at_date.data) + + calibrated_forecast_var_at_date = forecast_var_at_date + calibrated_forecast_var_at_date.data = predicted_var + + coeff_cubes = self._create_coefficient_cube( + calibrated_forecast_predictor_at_date, + optimised_coeffs[date], coeff_names) + + calibrated_forecast_predictor_all_dates.append( + calibrated_forecast_predictor_at_date) + calibrated_forecast_var_all_dates.append( + calibrated_forecast_var_at_date) + calibrated_forecast_coefficients_all_dates.extend(coeff_cubes) + + return (calibrated_forecast_predictor_all_dates, + calibrated_forecast_var_all_dates, + calibrated_forecast_coefficients_all_dates)
+ + +
[docs]class EnsembleCalibration(object): + """ + Plugin to wrap the core EMOS processes: + 1. Estimate optimised EMOS coefficients from training period. + 2. Apply optimised EMOS coefficients for future dates. + + """ + def __init__(self, calibration_method, distribution, desired_units, + predictor_of_mean_flag="mean"): + """ + Create an ensemble calibration plugin that, for Nonhomogeneous Gaussian + Regression, calculates coefficients based on historical forecasts and + applies the coefficients to the current forecast. + + Parameters + ---------- + calibration_method : String + The calibration method that will be applied. + Supported methods are: + ensemble model output statistics + nonhomogeneous gaussian regression + Currently these methods are not supported: + logistic regression + bayesian model averaging + distribution : String + The distribution that will be used for calibration. This will be + dependent upon the input phenomenon. This has to be supported by + the minimisation functions in + ContinuousRankedProbabilityScoreMinimisers. + desired_units : String or cf_units.Unit + The unit that you would like the calibration to be undertaken in. + The current forecast, historical forecast and truth will be + converted as required. + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + """ + self.calibration_method = calibration_method + self.distribution = distribution + self.desired_units = desired_units + self.predictor_of_mean_flag = predictor_of_mean_flag + + def __str__(self): + result = ('<EnsembleCalibration: ' + + 'calibration_method: {}' + + 'distribution: {};' + + 'desired_units: {};' + + 'predictor_of_mean_flag: {};') + return result.format( + self.calibration_method, self.distribution, self.desired_units, + self.predictor_of_mean_flag) + +
[docs] def process(self, current_forecast, historic_forecast, truth): + """ + Performs ensemble calibration through the following steps: + 1. Estimate optimised coefficients from training period. + 2. Apply optimised coefficients to current forecast. + + Parameters + ---------- + current_forecast : Iris Cube or CubeList + The Cube or CubeList that provides the input forecast for + the current cycle. + historic_forecast : Iris Cube or CubeList + The Cube or CubeList that provides the input historic forecasts for + calibration. + truth : Iris Cube or CubeList + The Cube or CubeList that provides the input truth for calibration + with dates matching the historic forecasts. + + Returns + ------- + * calibrated_forecast_predictor_and_variance : CubeList + CubeList containing the calibrated forecast predictor and + calibrated forecast variance. + + """ + def format_calibration_method(calibration_method): + """Lowercase input string, and replace underscores with spaces.""" + return calibration_method.lower().replace("_", " ") + + # Ensure predictor_of_mean_flag is valid. + check_predictor_of_mean_flag(self.predictor_of_mean_flag) + + if (format_calibration_method(self.calibration_method) in + ["ensemble model output statistics", + "nonhomogeneous gaussian regression"]): + if (format_calibration_method(self.distribution) in + ["gaussian", "truncated gaussian"]): + ec = EstimateCoefficientsForEnsembleCalibration( + self.distribution, self.desired_units, + predictor_of_mean_flag=self.predictor_of_mean_flag) + optimised_coeffs, coeff_names = ( + ec.estimate_coefficients_for_ngr( + current_forecast, historic_forecast, truth)) + else: + msg = ("Other calibration methods are not available. " + "{} is not available".format( + format_calibration_method(self.calibration_method))) + raise ValueError(msg) + ac = ApplyCoefficientsFromEnsembleCalibration( + current_forecast, optimised_coeffs, coeff_names, + predictor_of_mean_flag=self.predictor_of_mean_flag) + (calibrated_forecast_predictor, calibrated_forecast_variance, + calibrated_forecast_coefficients) = ac.apply_params_entry() + calibrated_forecast_predictor_and_variance = iris.cube.CubeList([ + calibrated_forecast_predictor, calibrated_forecast_variance]) + return calibrated_forecast_predictor_and_variance
+ + +
[docs]class GeneratePercentilesFromMeanAndVariance(object): + """ + Plugin focussing on generating percentiles from mean and variance. + In combination with the EnsembleReordering plugin, this is Ensemble + Copula Coupling. + """ + + def __init__(self): + """Initialise the class.""" + pass + + def _create_cube_with_percentiles( + self, percentiles, template_cube, cube_data): + """ + Create a cube with a percentile coordinate based on a template cube. + + Parameters + ---------- + percentiles : List + Ensemble percentiles. + template_cube : Iris cube + Cube to copy majority of coordinate definitions from. + cube_data : Numpy array + Data to insert into the template cube. + The data is expected to have the shape of + percentiles (0th dimension), time (1st dimension), + y_coord (2nd dimension), x_coord (3rd dimension). + + Returns + ------- + String + Coordinate name of the matched coordinate. + + """ + percentile_coord = iris.coords.DimCoord( + np.float32(percentiles), long_name="percentile", + units=unit.Unit("1"), var_name="percentile") + + time_coord = template_cube.coord("time") + y_coord = template_cube.coord(axis="y") + x_coord = template_cube.coord(axis="x") + + dim_coords_and_dims = [ + (percentile_coord, 0), (time_coord, 1), + (y_coord, 2), (x_coord, 3)] + + frt_coord = template_cube.coord("forecast_reference_time") + fp_coord = template_cube.coord("forecast_period") + aux_coords_and_dims = [(frt_coord, 1), (fp_coord, 1)] + + metadata_dict = copy.deepcopy(template_cube.metadata._asdict()) + + cube = iris.cube.Cube( + cube_data, dim_coords_and_dims=dim_coords_and_dims, + aux_coords_and_dims=aux_coords_and_dims, **metadata_dict) + cube.attributes = template_cube.attributes + cube.cell_methods = template_cube.cell_methods + return cube + + def _mean_and_variance_to_percentiles( + self, calibrated_forecast_predictor, calibrated_forecast_variance, + percentiles): + """ + Function returning percentiles based on the supplied + mean and variance. The percentiles are created by assuming a + Gaussian distribution and calculating the value of the phenomenon at + specific points within the distribution. + + Parameters + ---------- + calibrated_forecast_predictor : cube + Predictor for the calibrated forecast i.e. the mean. + calibrated_forecast_variance : cube + Variance for the calibrated forecast. + percentiles : List + Percentiles at which to calculate the value of the phenomenon at. + + Returns + ------- + percentile_cube : Iris cube + Cube containing the values for the phenomenon at each of the + percentiles requested. + + """ + if not calibrated_forecast_predictor.coord_dims("time"): + calibrated_forecast_predictor = iris.util.new_axis( + calibrated_forecast_predictor, "time") + if not calibrated_forecast_variance.coord_dims("time"): + calibrated_forecast_variance = iris.util.new_axis( + calibrated_forecast_variance, "time") + + calibrated_forecast_predictor_data = ( + calibrated_forecast_predictor.data.flatten()) + calibrated_forecast_variance_data = ( + calibrated_forecast_variance.data.flatten()) + + result = np.zeros((calibrated_forecast_predictor_data.shape[0], + len(percentiles))) + + # Loop over percentiles, and use a normal distribution with the mean + # and variance to calculate the values at each percentile. + for index, percentile in enumerate(percentiles): + percentile_list = np.repeat( + percentile, len(calibrated_forecast_predictor_data)) + result[:, index] = norm.ppf( + percentile_list, loc=calibrated_forecast_predictor_data, + scale=np.sqrt(calibrated_forecast_variance_data)) + # If percent point function (PPF) returns NaNs, fill in + # mean instead of NaN values. NaN will only be generated if the + # variance is zero. Therefore, if the variance is zero, the mean + # value is used for all gridpoints with a NaN. + if np.any(calibrated_forecast_variance_data == 0): + nan_index = np.argwhere(np.isnan(result[:, index])) + result[nan_index, index] = ( + calibrated_forecast_predictor_data[nan_index]) + if np.any(np.isnan(result)): + msg = ("NaNs are present within the result for the {} " + "percentile. Unable to calculate the percent point " + "function.") + raise ValueError(msg) + + result = result.T + + t_coord = calibrated_forecast_predictor.coord("time") + y_coord = calibrated_forecast_predictor.coord(axis="y") + x_coord = calibrated_forecast_predictor.coord(axis="x") + + result = result.reshape( + len(percentiles), len(t_coord.points), len(y_coord.points), + len(x_coord.points)) + percentile_cube = self._create_cube_with_percentiles( + percentiles, calibrated_forecast_predictor, result) + + percentile_cube.cell_methods = {} + return percentile_cube + + def _create_percentiles( + self, no_of_percentiles, sampling="quantile"): + """ + Function to create percentiles. + + Parameters + ---------- + no_of_percentiles : Int + Number of percentiles. + sampling : String + Type of sampling of the distribution to produce a set of + percentiles e.g. quantile or random. + Accepted options for sampling are: + Quantile: A regular set of equally-spaced percentiles aimed + at dividing a Cumulative Distribution Function into + blocks of equal probability. + Random: A random set of ordered percentiles. + + For further details, Flowerdew, J., 2014. + Calibrating ensemble reliability whilst preserving spatial structure. + Tellus, Series A: Dynamic Meteorology and Oceanography, 66(1), pp.1-20. + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + Returns + ------- + percentiles : List + Percentiles calculated using the sampling technique specified. + + """ + if sampling in ["quantile"]: + percentiles = np.linspace( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles), + no_of_percentiles).tolist() + elif sampling in ["random"]: + percentiles = [] + for _ in range(no_of_percentiles): + percentiles.append( + random.uniform( + 1/float(1+no_of_percentiles), + no_of_percentiles/float(1+no_of_percentiles))) + percentiles = sorted(percentiles) + else: + msg = "The {} sampling option is not yet implemented.".format( + sampling) + raise ValueError(msg) + return percentiles + +
[docs] def process(self, calibrated_forecast_predictor_and_variance, + raw_forecast): + """ + Generate ensemble percentiles from the mean and variance. + + Parameters + ---------- + calibrated_forecast_predictor_and_variance : Iris CubeList + CubeList containing the calibrated forecast predictor and + calibrated forecast variance. + raw_forecast : Iris Cube or CubeList + Cube or CubeList that is expected to be the raw + (uncalibrated) forecast. + Returns + ------- + calibrated_forecast_percentiles : Iris cube + Cube for calibrated percentiles. + + """ + (calibrated_forecast_predictor, calibrated_forecast_variance) = ( + calibrated_forecast_predictor_and_variance) + + calibrated_forecast_predictor = concatenate_cubes( + calibrated_forecast_predictor) + calibrated_forecast_variance = concatenate_cubes( + calibrated_forecast_variance) + rename_coordinate( + raw_forecast, "ensemble_member_id", "realization") + raw_forecast_members = concatenate_cubes(raw_forecast) + + no_of_percentiles = len( + raw_forecast_members.coord("realization").points) + + percentiles = self._create_percentiles(no_of_percentiles) + calibrated_forecast_percentiles = ( + self._mean_and_variance_to_percentiles( + calibrated_forecast_predictor, + calibrated_forecast_variance, + percentiles)) + + return calibrated_forecast_percentiles
+ + +
[docs]class EnsembleReordering(object): + """ + Plugin for applying the reordering step of Ensemble Copula Coupling, + in order to generate ensemble members from percentiles. + The percentiles are assumed to be in ascending order. + + Reference: + Schefzik, R., Thorarinsdottir, T.L. & Gneiting, T., 2013. + Uncertainty Quantification in Complex Simulation Models Using Ensemble + Copula Coupling. + Statistical Science, 28(4), pp.616-640. + + """ + def __init__(self): + """Initialise the class.""" + pass + +
[docs] def rank_ecc(self, calibrated_forecast_percentiles, raw_forecast_members): + """ + Function to apply Ensemble Copula Coupling. This ranks the calibrated + forecast members based on a ranking determined from the raw forecast + members. + + Parameters + ---------- + calibrated_forecast_percentiles : cube + Cube for calibrated percentiles. The percentiles are assumed to be + in ascending order. + raw_forecast_members : cube + Cube containing the raw (uncalibrated) forecasts. + + Returns + ------- + Iris cube + Cube for calibrated members where at a particular grid point, + the ranking of the values within the ensemble matches the ranking + from the raw ensemble. + + """ + results = iris.cube.CubeList([]) + for rawfc, calfc in zip( + raw_forecast_members.slices_over("time"), + calibrated_forecast_percentiles.slices_over("time")): + random_data = np.random.random(rawfc.data.shape) + # Lexsort returns the indices sorted firstly by the primary key, + # the raw forecast data, and secondly by the secondary key, an + # array of random data, in order to split tied values randomly. + sorting_index = np.lexsort((random_data, rawfc.data), axis=0) + # Returns the indices that would sort the array. + ranking = np.argsort(sorting_index, axis=0) + # Index the calibrated forecast data using the ranking array. + # np.choose allows indexing of a 3d array using a 3d array, + calfc.data = np.choose(ranking, calfc.data) + results.append(calfc) + return concatenate_cubes(results)
+ +
[docs] def process(self, calibrated_forecast, raw_forecast): + """ + Parameters + ---------- + calibrated_forecast : Iris Cube or CubeList + The cube or cubelist containing the calibrated forecast members. + raw_forecast : Iris Cube or CubeList + The cube or cubelist containing the raw (uncalibrated) forecast. + + Returns + ------- + calibrated_forecast_members : cube + Cube for a new ensemble member where all points within the dataset + are representative of a specified probability threshold across the + whole domain. + """ + rename_coordinate( + raw_forecast, "ensemble_member_id", "realization") + calibrated_forecast_percentiles = concatenate_cubes( + calibrated_forecast, + coords_to_slice_over=["percentile", "time"]) + raw_forecast_members = concatenate_cubes(raw_forecast) + calibrated_forecast_members = self.rank_ecc( + calibrated_forecast_percentiles, raw_forecast_members) + rename_coordinate( + calibrated_forecast_members, "percentile", "realization") + return calibrated_forecast_members
+
+ +
+
+
+ +
+
+ + + + \ No newline at end of file diff --git a/doc/build/html/_modules/improver/ensemble_calibration/ensemble_calibration_utilities.html b/doc/build/html/_modules/improver/ensemble_calibration/ensemble_calibration_utilities.html new file mode 100644 index 0000000000..12ce21a086 --- /dev/null +++ b/doc/build/html/_modules/improver/ensemble_calibration/ensemble_calibration_utilities.html @@ -0,0 +1,397 @@ + + + + + + + + improver.ensemble_calibration.ensemble_calibration_utilities — Improver documentation + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for improver.ensemble_calibration.ensemble_calibration_utilities

+# -*- coding: utf-8 -*-
+# -----------------------------------------------------------------------------
+# (C) British Crown Copyright 2017 Met Office.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+"""
+This module defines all the utilities used by the "plugins"
+specific for ensemble calibration.
+
+"""
+import numpy as np
+
+import iris
+
+
+
[docs]def convert_cube_data_to_2d( + forecast, coord="realization", transpose=True): + """ + Function to convert data from a N-dimensional cube into a 2d + numpy array. The result can be transposed, if required. + + Parameters + ---------- + forecast : Iris cube + N-dimensional cube to be reshaped. + coord : String + The data will be flattened along this coordinate. + transpose : Logical + If True, the resulting flattened data is transposed. + If False, the resulting flattened data is not transposed. + + Returns + ------- + forecast_data : Numpy array + Reshaped 2d array. + + """ + forecast_data = [] + for coord_slice in forecast.slices_over(coord): + forecast_data.append(coord_slice.data.flatten()) + if transpose: + forecast_data = np.asarray(forecast_data).T + return np.array(forecast_data)
+ + +
[docs]def concatenate_cubes( + cubes, coords_to_slice_over=["realization", "time"], + master_coord="time", + coordinates_for_association=["forecast_reference_time", + "forecast_period"]): + """ + Function to concatenate cubes, accounting for differences in the + history attribute, and allow promotion of forecast_reference_time + and forecast_period coordinates from scalar coordinates to auxiliary + coordinates to allow concatenation. + + Parameters + ---------- + cubes : Iris cubelist or Iris cube + Cubes to be concatenated. + coords_to_slice_over : List + Coordinates to be sliced over. + master_coord : String + Coordinate that the other coordinates will be associated with. + coordinates_for_association : List + List of coordinates to be associated with the master_coord. + + Returns + ------- + Iris cube + Concatenated cube. + + """ + if isinstance(cubes, iris.cube.Cube): + cubes = iris.cube.CubeList([cubes]) + + for coord_to_slice_over in coords_to_slice_over: + cubes = _slice_over_coordinate(cubes, coord_to_slice_over) + + cubes = _strip_var_names(cubes) + + associated_with_time_cubelist = iris.cube.CubeList([]) + for cube in cubes: + associated_with_time_cubelist.append( + _associate_any_coordinate_with_master_coordinate( + cube, master_coord=master_coord, + coordinates=coordinates_for_association)) + return associated_with_time_cubelist.concatenate_cube()
+ + +def _associate_any_coordinate_with_master_coordinate( + cube, master_coord="time", coordinates=None): + """ + Function to convert the given coordinates from scalar coordinates to + auxiliary coordinates, where these auxiliary coordinates will be + associated with the master coordinate. + + For example, forecast_reference_time and forecast_period can be converted + from scalar coordinates to auxiliary coordinates, and associated with time. + + Parameters + ---------- + cube : Iris cube + Cube requiring addition of the specified coordinates as auxiliary + coordinates. + master_coord : String + Coordinate that the other coordinates will be associated with. + coordinates : None or List + List of coordinates to be associated with the master_coord. + + Returns + ------- + forecast_data : Iris cube + Cube where the the requested coordinates have been added to the cube + as auxiliary coordinates and associated with the desired master + coordinate. + + """ + if coordinates is None: + coordinates = [] + for coord in coordinates: + if cube.coords(coord): + if cube.coords(master_coord): + temp_coord = cube.coord(coord) + cube.remove_coord(coord) + temp_aux_coord = iris.coords.AuxCoord( + temp_coord.points, + standard_name=temp_coord.standard_name, + long_name=temp_coord.long_name, + var_name=temp_coord.var_name, units=temp_coord.units, + bounds=temp_coord.bounds, + attributes=temp_coord.attributes, + coord_system=temp_coord.coord_system) + coord_names = [ + coord.standard_name for coord in cube.dim_coords] + cube.add_aux_coord( + temp_aux_coord, + data_dims=coord_names.index(master_coord)) + else: + msg = ( + "The master coordinate for associating other " + + "coordinates with is not present: " + + "master_coord: {}, other coordinates: {}".format( + master_coord, coordinates)) + raise ValueError(msg) + return cube + + +def _slice_over_coordinate(cubes, coord_to_slice_over, remove_history=True): + """ + Function slice over the requested coordinate, + promote the sliced coordinate into a dimension coordinate and + remove the history attribute to help concatenation. + + Parameters + ---------- + cubes : Iris cubelist or Iris cube + Cubes to be concatenated. + coords_to_slice_over : List + Coordinates to be sliced over. + remove_history : Logical + Option to remove the history attribute to help make concatenation + more likely. remove_history is set to True as default. + + Returns + ------- + Iris CubeList + CubeList containing sliced cubes. + + """ + sliced_by_coord_cubelist = iris.cube.CubeList([]) + if isinstance(cubes, iris.cube.Cube): + cubes = iris.cube.CubeList([cubes]) + for cube in cubes: + if cube.coords(coord_to_slice_over): + for coord_slice in cube.slices_over(coord_to_slice_over): + coord_slice = iris.util.new_axis( + coord_slice, coord_to_slice_over) + if (remove_history and + "history" in coord_slice.attributes.keys()): + coord_slice.attributes.pop("history") + sliced_by_coord_cubelist.append(coord_slice) + else: + sliced_by_coord_cubelist.append(cube) + return sliced_by_coord_cubelist + + +def _strip_var_names(cubes): + """ + Strips var_name from the cube and from all coordinates + to help concatenation. + + Parameters + ---------- + cubes : Iris cubelist or Iris cube + Cubes to be concatenated. + + Returns + ------- + Iris CubeList + CubeList containing original cubes without a var_name on the cube, + or on the coordinates. + + """ + if isinstance(cubes, iris.cube.Cube): + cubes = iris.cube.CubeList([cubes]) + for cube in cubes: + cube.var_name = None + for coord in cube.coords(): + coord.var_name = None + return cubes + + +
[docs]def rename_coordinate(cubes, original_coord, renamed_coord): + """ + Renames a coordinate to an alternative name for an + input Iris Cube or Iris CubeList. + + Parameters + ---------- + cubes : Iris cubelist or Iris cube + Cubes with coordinates to be renamed. + original_coord : String + Original name for the coordinate. + renamed_coord : String + Name for the coordinate to be renamed to. + + """ + if isinstance(cubes, iris.cube.Cube): + _renamer(cubes, original_coord, renamed_coord) + elif isinstance(cubes, iris.cube.CubeList): + for cube in cubes: + _renamer(cube, original_coord, renamed_coord) + else: + msg = ("A Cube or CubeList is not provided for renaming " + "{} to {}. Variable provided " + "is of type: {}".format( + original_coord, renamed_coord, type(cubes))) + raise TypeError(msg)
+ + +def _renamer(cube, original_coord, renamed_coord): + """ + Renames a coordinate to an alternative name. + If the coordinate is not found within the cube, then the + original cube is returned. + + Parameters + ---------- + cube : Iris cube + Cube with coordinates to be renamed. + original_coord : String + Original name for the coordinate. + renamed_coord : String + Name for the coordinate to be renamed to. + + """ + if cube.coords(original_coord): + cube.coord(original_coord).rename(renamed_coord) + + +
[docs]def check_predictor_of_mean_flag(predictor_of_mean_flag): + """ + Check the predictor_of_mean_flag at the start of the + estimate_coefficients_for_ngr method, to avoid having to check + and raise an error later. + + Parameters + ---------- + predictor_of_mean_flag : String + String to specify the input to calculate the calibrated mean. + Currently the ensemble mean ("mean") and the ensemble members + ("members") are supported as the predictors. + + """ + if predictor_of_mean_flag.lower() not in ["mean", "members"]: + msg = ("The requested value for the predictor_of_mean_flag {}" + "is not an accepted value." + "Accepted values are 'mean' or 'members'").format( + predictor_of_mean_flag.lower()) + raise ValueError(msg)
+
+ +
+
+
+ +
+
+ + + + \ No newline at end of file diff --git a/doc/build/html/_modules/improver/nbhood.html b/doc/build/html/_modules/improver/nbhood.html new file mode 100644 index 0000000000..21ca53f46c --- /dev/null +++ b/doc/build/html/_modules/improver/nbhood.html @@ -0,0 +1,252 @@ + + + + + + + + improver.nbhood — Improver documentation + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for improver.nbhood

+# -*- coding: utf-8 -*-
+# -----------------------------------------------------------------------------
+# (C) British Crown Copyright 2017 Met Office.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+"""Module containing neighbourhood processing utilities."""
+
+
+import iris
+import numpy as np
+import scipy.ndimage.filters
+
+
+
[docs]class BasicNeighbourhoodProcessing(object): + """ + Apply a neigbourhood processing kernel to a thresholded cube. + + When applied to a thresholded probabilistic cube, it acts like a + low-pass filter which reduces noisiness in the probabilities. + + The kernel will presently only work with projections in which the + x grid point spacing and y grid point spacing are constant over the + entire domain, such as the UK national grid projection. + + A maximum kernel radius of 500 grid cells is imposed in order to + avoid computational ineffiency and possible memory errors. + + """ + + # Max extent of kernel in grid cells. + MAX_KERNEL_CELL_RADIUS = 500 + + def __init__(self, radius_in_km, unweighted_mode=False): + """ + Create a neighbourhood processing plugin that applies a smoothing + kernel to points in a cube. + + Parameters + ---------- + + radius_in_km : float + The radius in kilometres of the neighbourhood kernel to + apply. Rounded up to convert into integer number of grid + points east and north, based on the characteristic spacing + at the zero indices of the cube projection-x/y coords. + + unweighted_mode : boolean + If True, use a circle with constant weighting. + If False, use a circle for neighbourhood kernel with + weighting decreasing with radius. + + """ + self.radius_in_km = float(radius_in_km) + self.unweighted_mode = bool(unweighted_mode) + + def __str__(self): + result = ('<NeighbourhoodProcessing: radius_in_km: {};' + + 'unweighted_mode: {}>') + return result.format( + self.radius_in_km, self.unweighted_mode) + +
[docs] def get_grid_x_y_kernel_ranges(self, cube): + """Return grid cell numbers east and north for the kernel.""" + try: + x_coord = cube.coord("projection_x_coordinate").copy() + y_coord = cube.coord("projection_y_coordinate").copy() + except iris.exceptions.CoordinateNotFoundError: + raise ValueError("Invalid grid: projection_x/y coords required") + x_coord.convert_units("metres") + y_coord.convert_units("metres") + d_north_metres = y_coord.points[1] - y_coord.points[0] + d_east_metres = x_coord.points[1] - x_coord.points[0] + grid_cells_y = int(self.radius_in_km * 1000 / abs(d_north_metres)) + grid_cells_x = int(self.radius_in_km * 1000 / abs(d_east_metres)) + if grid_cells_x == 0 or grid_cells_y == 0: + raise ValueError( + ("Neighbourhood processing radius of " + + "{0} km ".format(self.radius_in_km) + + "gives zero cell extent") + ) + if (grid_cells_x > self.MAX_KERNEL_CELL_RADIUS or + grid_cells_y > self.MAX_KERNEL_CELL_RADIUS): + raise ValueError( + ("Neighbourhood processing radius of " + + "{0} km ".format(self.radius_in_km) + + "exceeds maximum grid cell extent") + ) + return grid_cells_x, grid_cells_y
+ +
[docs] def process(self, cube): + """ + Set the specified name and units metadata to the cube from the upstream + plugin. + + Returns + ------- + Cube + The cube from the upstream plugin with name and units metadata + applied. + + """ + try: + realiz_coord = cube.coord('realization') + except iris.exceptions.CoordinateNotFoundError: + pass + else: + if len(realiz_coord.points) > 1: + raise ValueError("Does not operate across realizations.") + if np.isnan(cube.data).any(): + raise ValueError("Error: NaN detected in input cube data") + data = cube.data + ranges = self.get_grid_x_y_kernel_ranges(cube) + fullranges = np.zeros([np.rank(data)]) + axes = [] + for coord_name in ['projection_x_coordinate', + 'projection_y_coordinate']: + axes.append(cube.coord_dims(coord_name)[0]) + for axis_index, axis in enumerate(axes): + fullranges[axis] = ranges[axis_index] + kernel = np.ones([1 + x * 2 for x in fullranges]) + n = np.ogrid[tuple([slice(-x, x+1) for x in ranges])] + if self.unweighted_mode: + mask = np.reshape( + np.sum([x ** 2 for x in n]) > np.cumprod(ranges)[-1], + np.shape(kernel) + ) + else: + kernel[:] = ( + (np.cumprod(ranges)[-1] - np.sum([x**2. for x in n])) / + np.cumprod(ranges)[-1] + ) + mask = kernel < 0. + kernel[mask] = 0. + cube.data = scipy.ndimage.filters.correlate( + data, kernel, mode='nearest') / np.sum(kernel) + return cube
+
+ +
+
+
+ +
+
+ + + + \ No newline at end of file diff --git a/doc/build/html/_modules/improver/threshold.html b/doc/build/html/_modules/improver/threshold.html new file mode 100644 index 0000000000..8e13986e8d --- /dev/null +++ b/doc/build/html/_modules/improver/threshold.html @@ -0,0 +1,196 @@ + + + + + + + + improver.threshold — Improver documentation + + + + + + + + + + + + + + + + +
+
+
+
+ +

Source code for improver.threshold

+# -*- coding: utf-8 -*-
+# -----------------------------------------------------------------------------
+# (C) British Crown Copyright 2017 Met Office.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# * Redistributions of source code must retain the above copyright notice, this
+#   list of conditions and the following disclaimer.
+#
+# * Redistributions in binary form must reproduce the above copyright notice,
+#   this list of conditions and the following disclaimer in the documentation
+#   and/or other materials provided with the distribution.
+#
+# * Neither the name of the copyright holder nor the names of its
+#   contributors may be used to endorse or promote products derived from
+#   this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+"""Module containing thresholding classes."""
+
+
+import numpy as np
+
+
+
[docs]class BasicThreshold(object): + + """Apply a threshold truth criterion to a cube. + + Calculate the threshold truth value based on a linear membership function + around the threshold. + + Can operate on multiple time sequences within a cube. + + """ + + def __init__(self, threshold, fuzzy_factor, + below_thresh_ok=False): + """Set up for processing an in-or-out of threshold binary field. + + Parameters + ---------- + + threshold : float + The threshold point for 'significant' datapoints. + + fuzzy_factor : float + Percentage above or below threshold for fuzzy membership value. + + below_thresh_ok : boolean + True to count points as significant if *below* the threshold, + False to count points as significant if *above* the threshold. + + """ + if threshold == 0.0: + raise ValueError( + "Invalid threshold: zero not allowed") + self.threshold = threshold + if not 0 < fuzzy_factor < 1: + raise ValueError( + "Invalid fuzzy_factor: must be >0 and <1: {}".format( + fuzzy_factor)) + self.fuzzy_factor = fuzzy_factor + self.below_thresh_ok = below_thresh_ok + + def __str__(self): + """Represent the configured plugin instance as a string.""" + return ( + '<BasicThreshold: threshold {}, fuzzy factor {}' + + 'below_thresh_ok: {}>' + ).format(self.threshold, self.fuzzy_factor, self.below_thresh_ok) + +
[docs] def process(self, cube): + """Convert each point to a fuzzy truth value based on threshold. + + Parameters + ---------- + + cube : iris.cube.Cube + Cube to threshold. The code is dimension-agnostic. + + """ + lower_threshold = self.threshold * self.fuzzy_factor + if np.isnan(cube.data).any(): + raise ValueError("Error: NaN detected in input cube data") + truth_value = ( + (cube.data - lower_threshold) / + ((self.threshold * (2. - self.fuzzy_factor)) - lower_threshold) + ) + truth_value = np.clip(truth_value, 0., 1.) + if self.below_thresh_ok: + truth_value = 1. - truth_value + cube.data = truth_value + return cube
+
+ +
+
+
+ +
+
+ + + + \ No newline at end of file diff --git a/doc/build/html/_modules/index.html b/doc/build/html/_modules/index.html new file mode 100644 index 0000000000..219535f37d --- /dev/null +++ b/doc/build/html/_modules/index.html @@ -0,0 +1,92 @@ + + + + + + + + Overview: module code — Improver documentation + + + + + + + + + + + + + + + +
+ + +
+
+ + + + \ No newline at end of file diff --git a/doc/build/html/_sources/improver.ensemble_calibration.txt b/doc/build/html/_sources/improver.ensemble_calibration.txt new file mode 100644 index 0000000000..5ff3a3ee7d --- /dev/null +++ b/doc/build/html/_sources/improver.ensemble_calibration.txt @@ -0,0 +1,30 @@ +improver.ensemble_calibration package +===================================== + +Submodules +---------- + +improver.ensemble_calibration.ensemble_calibration module +--------------------------------------------------------- + +.. automodule:: improver.ensemble_calibration.ensemble_calibration + :members: + :undoc-members: + :show-inheritance: + +improver.ensemble_calibration.ensemble_calibration_utilities module +------------------------------------------------------------------- + +.. automodule:: improver.ensemble_calibration.ensemble_calibration_utilities + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: improver.ensemble_calibration + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/build/html/_sources/improver.grids.txt b/doc/build/html/_sources/improver.grids.txt new file mode 100644 index 0000000000..625dfd130e --- /dev/null +++ b/doc/build/html/_sources/improver.grids.txt @@ -0,0 +1,22 @@ +improver.grids package +====================== + +Submodules +---------- + +improver.grids.osgb module +-------------------------- + +.. automodule:: improver.grids.osgb + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: improver.grids + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/build/html/_sources/improver.txt b/doc/build/html/_sources/improver.txt new file mode 100644 index 0000000000..8a0472e33e --- /dev/null +++ b/doc/build/html/_sources/improver.txt @@ -0,0 +1,38 @@ +improver package +================ + +Subpackages +----------- + +.. toctree:: + + improver.ensemble_calibration + improver.grids + +Submodules +---------- + +improver.nbhood module +---------------------- + +.. automodule:: improver.nbhood + :members: + :undoc-members: + :show-inheritance: + +improver.threshold module +------------------------- + +.. automodule:: improver.threshold + :members: + :undoc-members: + :show-inheritance: + + +Module contents +--------------- + +.. automodule:: improver + :members: + :undoc-members: + :show-inheritance: diff --git a/doc/build/html/_sources/index.txt b/doc/build/html/_sources/index.txt new file mode 100644 index 0000000000..2aff0836f7 --- /dev/null +++ b/doc/build/html/_sources/index.txt @@ -0,0 +1,18 @@ +.. Improver documentation master file, created by + sphinx-quickstart on Fri May 19 13:27:21 2017. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +Welcome to Improver's documentation! +==================================== + +API documentation: :doc:`improver` + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + diff --git a/doc/build/html/_sources/modules.txt b/doc/build/html/_sources/modules.txt new file mode 100644 index 0000000000..8681ef9617 --- /dev/null +++ b/doc/build/html/_sources/modules.txt @@ -0,0 +1,7 @@ +lib +=== + +.. toctree:: + :maxdepth: 4 + + improver diff --git a/doc/build/html/_static/ajax-loader.gif b/doc/build/html/_static/ajax-loader.gif new file mode 100644 index 0000000000000000000000000000000000000000..61faf8cab23993bd3e1560bff0668bd628642330 GIT binary patch literal 673 zcmZ?wbhEHb6krfw_{6~Q|Nno%(3)e{?)x>&1u}A`t?OF7Z|1gRivOgXi&7IyQd1Pl zGfOfQ60;I3a`F>X^fL3(@);C=vM_KlFfb_o=k{|A33hf2a5d61U}gjg=>Rd%XaNQW zW@Cw{|b%Y*pl8F?4B9 zlo4Fz*0kZGJabY|>}Okf0}CCg{u4`zEPY^pV?j2@h+|igy0+Kz6p;@SpM4s6)XEMg z#3Y4GX>Hjlml5ftdH$4x0JGdn8~MX(U~_^d!Hi)=HU{V%g+mi8#UGbE-*ao8f#h+S z2a0-5+vc7MU$e-NhmBjLIC1v|)9+Im8x1yacJ7{^tLX(ZhYi^rpmXm0`@ku9b53aN zEXH@Y3JaztblgpxbJt{AtE1ad1Ca>{v$rwwvK(>{m~Gf_=-Ro7Fk{#;i~+{{>QtvI yb2P8Zac~?~=sRA>$6{!(^3;ZP0TPFR(G_-UDU(8Jl0?(IXu$~#4A!880|o%~Al1tN literal 0 HcmV?d00001 diff --git a/doc/build/html/_static/basic.css b/doc/build/html/_static/basic.css new file mode 100644 index 0000000000..0b79414a16 --- /dev/null +++ b/doc/build/html/_static/basic.css @@ -0,0 +1,611 @@ +/* + * basic.css + * ~~~~~~~~~ + * + * Sphinx stylesheet -- basic theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox input[type="text"] { + width: 170px; +} + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin: 10px 0 0 20px; + padding: 0; +} + +ul.search li { + padding: 5px 0 5px 20px; + background-image: url(file.png); + background-repeat: no-repeat; + background-position: 0 7px; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li div.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable dl, table.indextable dd { + margin-top: 0; + margin-bottom: 0; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.field-list ul { + padding-left: 1em; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px 7px 0 7px; + background-color: #ffe; + width: 40%; + float: right; +} + +p.sidebar-title { + font-weight: bold; +} + +/* -- topics ---------------------------------------------------------------- */ + +div.topic { + border: 1px solid #ccc; + padding: 7px 7px 0 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +div.admonition dl { + margin-bottom: 0; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + border: 0; + border-collapse: collapse; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +table.field-list td, table.field-list th { + border: 0 !important; +} + +table.footnote td, table.footnote th { + border: 0 !important; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text { +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +dl { + margin-bottom: 15px; +} + +dd p { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +dt:target, .highlighted { + background-color: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; +} + +td.linenos pre { + padding: 5px 0px; + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + margin-left: 0.5em; +} + +table.highlighttable td { + padding: 0 0.5em 0 0.5em; +} + +div.code-block-caption { + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +div.code-block-caption + div > div.highlight > pre { + margin-top: 0; +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + padding: 1em 1em 0; +} + +div.literal-block-wrapper div.highlight { + margin: 0; +} + +code.descname { + background-color: transparent; + font-weight: bold; + font-size: 1.2em; +} + +code.descclassname { + background-color: transparent; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/doc/build/html/_static/classic.css b/doc/build/html/_static/classic.css new file mode 100644 index 0000000000..d98894b3f6 --- /dev/null +++ b/doc/build/html/_static/classic.css @@ -0,0 +1,261 @@ +/* + * default.css_t + * ~~~~~~~~~~~~~ + * + * Sphinx stylesheet -- default theme. + * + * :copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS. + * :license: BSD, see LICENSE for details. + * + */ + +@import url("basic.css"); + +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: sans-serif; + font-size: 100%; + background-color: #11303d; + color: #000; + margin: 0; + padding: 0; +} + +div.document { + background-color: #1c4e63; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 230px; +} + +div.body { + background-color: #ffffff; + color: #000000; + padding: 0 20px 30px 20px; +} + +div.footer { + color: #ffffff; + width: 100%; + padding: 9px 0 9px 0; + text-align: center; + font-size: 75%; +} + +div.footer a { + color: #ffffff; + text-decoration: underline; +} + +div.related { + background-color: #133f52; + line-height: 30px; + color: #ffffff; +} + +div.related a { + color: #ffffff; +} + +div.sphinxsidebar { +} + +div.sphinxsidebar h3 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.4em; + font-weight: normal; + margin: 0; + padding: 0; +} + +div.sphinxsidebar h3 a { + color: #ffffff; +} + +div.sphinxsidebar h4 { + font-family: 'Trebuchet MS', sans-serif; + color: #ffffff; + font-size: 1.3em; + font-weight: normal; + margin: 5px 0 0 0; + padding: 0; +} + +div.sphinxsidebar p { + color: #ffffff; +} + +div.sphinxsidebar p.topless { + margin: 5px 10px 10px 10px; +} + +div.sphinxsidebar ul { + margin: 10px; + padding: 0; + color: #ffffff; +} + +div.sphinxsidebar a { + color: #98dbcc; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + + + +/* -- hyperlink styles ------------------------------------------------------ */ + +a { + color: #355f7c; + text-decoration: none; +} + +a:visited { + color: #355f7c; + text-decoration: none; +} + +a:hover { + text-decoration: underline; +} + + + +/* -- body styles ----------------------------------------------------------- */ + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: 'Trebuchet MS', sans-serif; + background-color: #f2f2f2; + font-weight: normal; + color: #20435c; + border-bottom: 1px solid #ccc; + margin: 20px -20px 10px -20px; + padding: 3px 0 3px 10px; +} + +div.body h1 { margin-top: 0; font-size: 200%; } +div.body h2 { font-size: 160%; } +div.body h3 { font-size: 140%; } +div.body h4 { font-size: 120%; } +div.body h5 { font-size: 110%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #c60f0f; + font-size: 0.8em; + padding: 0 4px 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + background-color: #c60f0f; + color: white; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + text-align: justify; + line-height: 130%; +} + +div.admonition p.admonition-title + p { + display: inline; +} + +div.admonition p { + margin-bottom: 5px; +} + +div.admonition pre { + margin-bottom: 5px; +} + +div.admonition ul, div.admonition ol { + margin-bottom: 5px; +} + +div.note { + background-color: #eee; + border: 1px solid #ccc; +} + +div.seealso { + background-color: #ffc; + border: 1px solid #ff6; +} + +div.topic { + background-color: #eee; +} + +div.warning { + background-color: #ffe4e4; + border: 1px solid #f66; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre { + padding: 5px; + background-color: #eeffcc; + color: #333333; + line-height: 120%; + border: 1px solid #ac9; + border-left: none; + border-right: none; +} + +code { + background-color: #ecf0f3; + padding: 0 1px 0 1px; + font-size: 0.95em; +} + +th { + background-color: #ede; +} + +.warning code { + background: #efc2c2; +} + +.note code { + background: #d6d6d6; +} + +.viewcode-back { + font-family: sans-serif; +} + +div.viewcode-block:target { + background-color: #f4debf; + border-top: 1px solid #ac9; + border-bottom: 1px solid #ac9; +} + +div.code-block-caption { + color: #efefef; + background-color: #1c4e63; +} \ No newline at end of file diff --git a/doc/build/html/_static/comment-bright.png b/doc/build/html/_static/comment-bright.png new file mode 100644 index 0000000000000000000000000000000000000000..551517b8c83b76f734ff791f847829a760ad1903 GIT binary patch literal 3500 zcmV;d4O8-oP)Oz@Z0f2-7z;ux~O9+4z06=<WDR*FRcSTFz- zW=q650N5=6FiBTtNC2?60Km==3$g$R3;-}uh=nNt1bYBr$Ri_o0EC$U6h`t_Jn<{8 z5a%iY0C<_QJh>z}MS)ugEpZ1|S1ukX&Pf+56gFW3VVXcL!g-k)GJ!M?;PcD?0HBc- z5#WRK{dmp}uFlRjj{U%*%WZ25jX z{P*?XzTzZ-GF^d31o+^>%=Ap99M6&ogks$0k4OBs3;+Bb(;~!4V!2o<6ys46agIcq zjPo+3B8fthDa9qy|77CdEc*jK-!%ZRYCZvbku9iQV*~a}ClFY4z~c7+0P?$U!PF=S z1Au6Q;m>#f??3%Vpd|o+W=WE9003S@Bra6Svp>fO002awfhw>;8}z{#EWidF!3EsG z3;bXU&9EIRU@z1_9W=mEXoiz;4lcq~xDGvV5BgyU zp1~-*fe8db$Osc*A=-!mVv1NJjtCc-h4>-CNCXm#Bp}I%6j35eku^v$Qi@a{RY)E3 zJ#qp$hg?Rwkvqr$GJ^buyhkyVfwECO)C{#lxu`c9ghrwZ&}4KmnvWKso6vH!8a<3Q zq36)6Xb;+tK10Vaz~~qUGsJ8#F2=(`u{bOVlVi)VBCHIn#u~6ztOL7=^<&SmcLWlF zMZgI*1b0FpVIDz9SWH+>*hr`#93(Um+6gxa1B6k+CnA%mOSC4s5&6UzVlpv@SV$}* z))J2sFA#f(L&P^E5{W}HC%KRUNwK6<(h|}}(r!{C=`5+6G)NjFlgZj-YqAG9lq?`C z$c5yc>d>VnA`E_*3F2Qp##d8RZb=H01_mm@+|Cqnc9PsG(F5HIG_C zt)aG3uTh7n6Et<2In9F>NlT@zqLtGcXcuVrX|L#Xx)I%#9!{6gSJKPrN9dR61N3(c z4Tcqi$B1Vr8Jidf7-t!G7_XR2rWwr)$3XQ?}=hpK0&Z&W{| zep&sA23f;Q!%st`QJ}G3cbou<7-yIK2z4nfCCCtN2-XOGSWo##{8Q{ATurxr~;I`ytDs%xbip}RzP zziy}Qn4Z2~fSycmr`~zJ=lUFdFa1>gZThG6M+{g7vkW8#+YHVaJjFF}Z#*3@$J_By zLtVo_L#1JrVVB{Ak-5=4qt!-@Mh}c>#$4kh<88)m#-k<%CLtzEP3leVno>={htGUuD;o7bD)w_sX$S}eAxwzy?UvgBH(S?;#HZiQMoS*2K2 zT3xe7t(~nU*1N5{rxB;QPLocnp4Ml>u<^FZwyC!nu;thW+pe~4wtZn|Vi#w(#jeBd zlf9FDx_yoPJqHbk*$%56S{;6Kv~mM9!g3B(KJ}#RZ#@)!hR|78Dq|Iq-afF%KE1Brn_fm;Im z_u$xr8UFki1L{Ox>G0o)(&RAZ;=|I=wN2l97;cLaHH6leTB-XXa*h%dBOEvi`+x zi?=Txl?TadvyiL>SuF~-LZ;|cS}4~l2eM~nS7yJ>iOM;atDY;(?aZ^v+mJV$@1Ote z62cPUlD4IWOIIx&SmwQ~YB{nzae3Pc;}r!fhE@iwJh+OsDs9zItL;~pu715HdQEGA zUct(O!LkCy1<%NCg+}G`0PgpNm-?d@-hMgNe6^V+j6x$b<6@S<$+<4_1hi}Ti zncS4LsjI}fWY1>OX6feMEuLErma3QLmkw?X+1j)X-&VBk_4Y;EFPF_I+q;9dL%E~B zJh;4Nr^(LEJ3myURP{Rblsw%57T)g973R8o)DE9*xN#~;4_o$q%o z4K@u`jhx2fBXC4{U8Qn{*%*B$Ge=nny$HAYq{=vy|sI0 z_vss+H_qMky?OB#|JK!>IX&II^LlUh#rO5!7TtbwC;iULyV-Xq?ybB}ykGP{?LpZ? z-G|jbTmIbG@7#ZCz;~eY(cDM(28Dyq{*m>M4?_iynUBkc4TkHUI6gT!;y-fz>HMcd z&t%Ugo)`Y2{>!cx7B7DI)$7;J(U{Spm-3gBzioV_{p!H$8L!*M!p0uH$#^p{Ui4P` z?ZJ24cOCDe-w#jZd?0@)|7iKK^;6KN`;!@ylm7$*nDhK&GcDTy000JJOGiWi{{a60 z|De66lK=n!32;bRa{vGf6951U69E94oEQKA00(qQO+^RV2niQ93PPz|JOBU!-bqA3 zR5;6pl1pe^WfX zkSdl!omi0~*ntl;2q{jA^;J@WT8O!=A(Gck8fa>hn{#u{`Tyg)!KXI6l>4dj==iVKK6+%4zaRizy(5eryC3d2 z+5Y_D$4}k5v2=Siw{=O)SWY2HJwR3xX1*M*9G^XQ*TCNXF$Vj(kbMJXK0DaS_Sa^1 z?CEa!cFWDhcwxy%a?i@DN|G6-M#uuWU>lss@I>;$xmQ|`u3f;MQ|pYuHxxvMeq4TW;>|7Z2*AsqT=`-1O~nTm6O&pNEK?^cf9CX= zkq5|qAoE7un3V z^yy=@%6zqN^x`#qW+;e7j>th{6GV}sf*}g7{(R#T)yg-AZh0C&U;WA`AL$qz8()5^ zGFi2`g&L7!c?x+A2oOaG0c*Bg&YZt8cJ{jq_W{uTdA-<;`@iP$$=$H?gYIYc_q^*$ z#k(Key`d40R3?+GmgK8hHJcwiQ~r4By@w9*PuzR>x3#(F?YW_W5pPc(t(@-Y{psOt zz2!UE_5S)bLF)Oz@Z0f2-7z;ux~O9+4z06=<WDR*FRcSTFz- zW=q650N5=6FiBTtNC2?60Km==3$g$R3;-}uh=nNt1bYBr$Ri_o0EC$U6h`t_Jn<{8 z5a%iY0C<_QJh>z}MS)ugEpZ1|S1ukX&Pf+56gFW3VVXcL!g-k)GJ!M?;PcD?0HBc- z5#WRK{dmp}uFlRjj{U%*%WZ25jX z{P*?XzTzZ-GF^d31o+^>%=Ap99M6&ogks$0k4OBs3;+Bb(;~!4V!2o<6ys46agIcq zjPo+3B8fthDa9qy|77CdEc*jK-!%ZRYCZvbku9iQV*~a}ClFY4z~c7+0P?$U!PF=S z1Au6Q;m>#f??3%Vpd|o+W=WE9003S@Bra6Svp>fO002awfhw>;8}z{#EWidF!3EsG z3;bXU&9EIRU@z1_9W=mEXoiz;4lcq~xDGvV5BgyU zp1~-*fe8db$Osc*A=-!mVv1NJjtCc-h4>-CNCXm#Bp}I%6j35eku^v$Qi@a{RY)E3 zJ#qp$hg?Rwkvqr$GJ^buyhkyVfwECO)C{#lxu`c9ghrwZ&}4KmnvWKso6vH!8a<3Q zq36)6Xb;+tK10Vaz~~qUGsJ8#F2=(`u{bOVlVi)VBCHIn#u~6ztOL7=^<&SmcLWlF zMZgI*1b0FpVIDz9SWH+>*hr`#93(Um+6gxa1B6k+CnA%mOSC4s5&6UzVlpv@SV$}* z))J2sFA#f(L&P^E5{W}HC%KRUNwK6<(h|}}(r!{C=`5+6G)NjFlgZj-YqAG9lq?`C z$c5yc>d>VnA`E_*3F2Qp##d8RZb=H01_mm@+|Cqnc9PsG(F5HIG_C zt)aG3uTh7n6Et<2In9F>NlT@zqLtGcXcuVrX|L#Xx)I%#9!{6gSJKPrN9dR61N3(c z4Tcqi$B1Vr8Jidf7-t!G7_XR2rWwr)$3XQ?}=hpK0&Z&W{| zep&sA23f;Q!%st`QJ}G3cbou<7-yIK2z4nfCCCtN2-XOGSWo##{8Q{ATurxr~;I`ytDs%xbip}RzP zziy}Qn4Z2~fSycmr`~zJ=lUFdFa1>gZThG6M+{g7vkW8#+YHVaJjFF}Z#*3@$J_By zLtVo_L#1JrVVB{Ak-5=4qt!-@Mh}c>#$4kh<88)m#-k<%CLtzEP3leVno>={htGUuD;o7bD)w_sX$S}eAxwzy?UvgBH(S?;#HZiQMoS*2K2 zT3xe7t(~nU*1N5{rxB;QPLocnp4Ml>u<^FZwyC!nu;thW+pe~4wtZn|Vi#w(#jeBd zlf9FDx_yoPJqHbk*$%56S{;6Kv~mM9!g3B(KJ}#RZ#@)!hR|78Dq|Iq-afF%KE1Brn_fm;Im z_u$xr8UFki1L{Ox>G0o)(&RAZ;=|I=wN2l97;cLaHH6leTB-XXa*h%dBOEvi`+x zi?=Txl?TadvyiL>SuF~-LZ;|cS}4~l2eM~nS7yJ>iOM;atDY;(?aZ^v+mJV$@1Ote z62cPUlD4IWOIIx&SmwQ~YB{nzae3Pc;}r!fhE@iwJh+OsDs9zItL;~pu715HdQEGA zUct(O!LkCy1<%NCg+}G`0PgpNm-?d@-hMgNe6^V+j6x$b<6@S<$+<4_1hi}Ti zncS4LsjI}fWY1>OX6feMEuLErma3QLmkw?X+1j)X-&VBk_4Y;EFPF_I+q;9dL%E~B zJh;4Nr^(LEJ3myURP{Rblsw%57T)g973R8o)DE9*xN#~;4_o$q%o z4K@u`jhx2fBXC4{U8Qn{*%*B$Ge=nny$HAYq{=vy|sI0 z_vss+H_qMky?OB#|JK!>IX&II^LlUh#rO5!7TtbwC;iULyV-Xq?ybB}ykGP{?LpZ? z-G|jbTmIbG@7#ZCz;~eY(cDM(28Dyq{*m>M4?_iynUBkc4TkHUI6gT!;y-fz>HMcd z&t%Ugo)`Y2{>!cx7B7DI)$7;J(U{Spm-3gBzioV_{p!H$8L!*M!p0uH$#^p{Ui4P` z?ZJ24cOCDe-w#jZd?0@)|7iKK^;6KN`;!@ylm7$*nDhK&GcDTy000JJOGiWi{{a60 z|De66lK=n!32;bRa{vGf6951U69E94oEQKA00(qQO+^RV2oe()A>y0J-2easEJ;K` zR5;6Jl3z%jbr{D#&+mQTbB>-f&3W<<%ayjKi&ZjBc2N<@)`~{dMXWB0(ajbV85_gJ zf(EU`iek}4Bt%55ix|sVMm1u8KvB#hnmU~_r<Ogd(A5vg_omvd-#L!=(BMVklxVqhdT zofSj`QA^|)G*lu58>#vhvA)%0Or&dIsb%b)st*LV8`ANnOipDbh%_*c7`d6# z21*z~Xd?ovgf>zq(o0?Et~9ti+pljZC~#_KvJhA>u91WRaq|uqBBKP6V0?p-NL59w zrK0w($_m#SDPQ!Z$nhd^JO|f+7k5xca94d2OLJ&sSxlB7F%NtrF@@O7WWlkHSDtor zzD?u;b&KN$*MnHx;JDy9P~G<{4}9__s&MATBV4R+MuA8TjlZ3ye&qZMCUe8ihBnHI zhMSu zSERHwrmBb$SWVr+)Yk2k^FgTMR6mP;@FY2{}BeV|SUo=mNk<-XSOHNErw>s{^rR-bu$@aN7= zj~-qXcS2!BA*(Q**BOOl{FggkyHdCJi_Fy>?_K+G+DYwIn8`29DYPg&s4$}7D`fv? zuyJ2sMfJX(I^yrf6u!(~9anf(AqAk&ke}uL0SIb-H!SaDQvd(}07*qoM6N<$g1Ha7 A2LJ#7 literal 0 HcmV?d00001 diff --git a/doc/build/html/_static/comment.png b/doc/build/html/_static/comment.png new file mode 100644 index 0000000000000000000000000000000000000000..92feb52b8824c6b0f59b658b1196c61de9162a95 GIT binary patch literal 3445 zcmV-*4T|!KP)Oz@Z0f2-7z;ux~O9+4z06=<WDR*FRcSTFz- zW=q650N5=6FiBTtNC2?60Km==3$g$R3;-}uh=nNt1bYBr$Ri_o0EC$U6h`t_Jn<{8 z5a%iY0C<_QJh>z}MS)ugEpZ1|S1ukX&Pf+56gFW3VVXcL!g-k)GJ!M?;PcD?0HBc- z5#WRK{dmp}uFlRjj{U%*%WZ25jX z{P*?XzTzZ-GF^d31o+^>%=Ap99M6&ogks$0k4OBs3;+Bb(;~!4V!2o<6ys46agIcq zjPo+3B8fthDa9qy|77CdEc*jK-!%ZRYCZvbku9iQV*~a}ClFY4z~c7+0P?$U!PF=S z1Au6Q;m>#f??3%Vpd|o+W=WE9003S@Bra6Svp>fO002awfhw>;8}z{#EWidF!3EsG z3;bXU&9EIRU@z1_9W=mEXoiz;4lcq~xDGvV5BgyU zp1~-*fe8db$Osc*A=-!mVv1NJjtCc-h4>-CNCXm#Bp}I%6j35eku^v$Qi@a{RY)E3 zJ#qp$hg?Rwkvqr$GJ^buyhkyVfwECO)C{#lxu`c9ghrwZ&}4KmnvWKso6vH!8a<3Q zq36)6Xb;+tK10Vaz~~qUGsJ8#F2=(`u{bOVlVi)VBCHIn#u~6ztOL7=^<&SmcLWlF zMZgI*1b0FpVIDz9SWH+>*hr`#93(Um+6gxa1B6k+CnA%mOSC4s5&6UzVlpv@SV$}* z))J2sFA#f(L&P^E5{W}HC%KRUNwK6<(h|}}(r!{C=`5+6G)NjFlgZj-YqAG9lq?`C z$c5yc>d>VnA`E_*3F2Qp##d8RZb=H01_mm@+|Cqnc9PsG(F5HIG_C zt)aG3uTh7n6Et<2In9F>NlT@zqLtGcXcuVrX|L#Xx)I%#9!{6gSJKPrN9dR61N3(c z4Tcqi$B1Vr8Jidf7-t!G7_XR2rWwr)$3XQ?}=hpK0&Z&W{| zep&sA23f;Q!%st`QJ}G3cbou<7-yIK2z4nfCCCtN2-XOGSWo##{8Q{ATurxr~;I`ytDs%xbip}RzP zziy}Qn4Z2~fSycmr`~zJ=lUFdFa1>gZThG6M+{g7vkW8#+YHVaJjFF}Z#*3@$J_By zLtVo_L#1JrVVB{Ak-5=4qt!-@Mh}c>#$4kh<88)m#-k<%CLtzEP3leVno>={htGUuD;o7bD)w_sX$S}eAxwzy?UvgBH(S?;#HZiQMoS*2K2 zT3xe7t(~nU*1N5{rxB;QPLocnp4Ml>u<^FZwyC!nu;thW+pe~4wtZn|Vi#w(#jeBd zlf9FDx_yoPJqHbk*$%56S{;6Kv~mM9!g3B(KJ}#RZ#@)!hR|78Dq|Iq-afF%KE1Brn_fm;Im z_u$xr8UFki1L{Ox>G0o)(&RAZ;=|I=wN2l97;cLaHH6leTB-XXa*h%dBOEvi`+x zi?=Txl?TadvyiL>SuF~-LZ;|cS}4~l2eM~nS7yJ>iOM;atDY;(?aZ^v+mJV$@1Ote z62cPUlD4IWOIIx&SmwQ~YB{nzae3Pc;}r!fhE@iwJh+OsDs9zItL;~pu715HdQEGA zUct(O!LkCy1<%NCg+}G`0PgpNm-?d@-hMgNe6^V+j6x$b<6@S<$+<4_1hi}Ti zncS4LsjI}fWY1>OX6feMEuLErma3QLmkw?X+1j)X-&VBk_4Y;EFPF_I+q;9dL%E~B zJh;4Nr^(LEJ3myURP{Rblsw%57T)g973R8o)DE9*xN#~;4_o$q%o z4K@u`jhx2fBXC4{U8Qn{*%*B$Ge=nny$HAYq{=vy|sI0 z_vss+H_qMky?OB#|JK!>IX&II^LlUh#rO5!7TtbwC;iULyV-Xq?ybB}ykGP{?LpZ? z-G|jbTmIbG@7#ZCz;~eY(cDM(28Dyq{*m>M4?_iynUBkc4TkHUI6gT!;y-fz>HMcd z&t%Ugo)`Y2{>!cx7B7DI)$7;J(U{Spm-3gBzioV_{p!H$8L!*M!p0uH$#^p{Ui4P` z?ZJ24cOCDe-w#jZd?0@)|7iKK^;6KN`;!@ylm7$*nDhK&GcDTy000JJOGiWi{{a60 z|De66lK=n!32;bRa{vGf6951U69E94oEQKA00(qQO+^RV2nzr)JMUJvzW@LNr%6OX zR5;6Zk;`k`RTRfR-*ac2G}PGmXsUu>6ce?Lsn$m^3Q`48f|TwQ+_-Qh=t8Ra7nE)y zf@08(pjZ@22^EVjG*%30TJRMkBUC$WqZ73uoiv&J=APqX;!v%AH}`Vx`999MVjXwy z{f1-vh8P<=plv&cZ>p5jjX~Vt&W0e)wpw1RFRuRdDkwlKb01tp5 zP=trFN0gH^|L4jJkB{6sCV;Q!ewpg-D&4cza%GQ*b>R*=34#dW;ek`FEiB(vnw+U# zpOX5UMJBhIN&;D1!yQoIAySC!9zqJmmfoJqmQp}p&h*HTfMh~u9rKic2oz3sNM^#F zBIq*MRLbsMt%y{EHj8}LeqUUvoxf0=kqji62>ne+U`d#%J)abyK&Y`=eD%oA!36<)baZyK zXJh5im6umkS|_CSGXips$nI)oBHXojzBzyY_M5K*uvb0_9viuBVyV%5VtJ*Am1ag# zczbv4B?u8j68iOz<+)nDu^oWnL+$_G{PZOCcOGQ?!1VCefves~rfpaEZs-PdVYMiV z98ElaJ2}7f;htSXFY#Zv?__sQeckE^HV{ItO=)2hMQs=(_ Xn!ZpXD%P(H00000NkvXXu0mjf= 0 && !jQuery(node.parentNode).hasClass(className)) { + var span = document.createElement("span"); + span.className = className; + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + node.parentNode.insertBefore(span, node.parentNode.insertBefore( + document.createTextNode(val.substr(pos + text.length)), + node.nextSibling)); + node.nodeValue = val.substr(0, pos); + } + } + else if (!jQuery(node).is("button, select, textarea")) { + jQuery.each(node.childNodes, function() { + highlight(this); + }); + } + } + return this.each(function() { + highlight(this); + }); +}; + +/* + * backward compatibility for jQuery.browser + * This will be supported until firefox bug is fixed. + */ +if (!jQuery.browser) { + jQuery.uaMatch = function(ua) { + ua = ua.toLowerCase(); + + var match = /(chrome)[ \/]([\w.]+)/.exec(ua) || + /(webkit)[ \/]([\w.]+)/.exec(ua) || + /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) || + /(msie) ([\w.]+)/.exec(ua) || + ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) || + []; + + return { + browser: match[ 1 ] || "", + version: match[ 2 ] || "0" + }; + }; + jQuery.browser = {}; + jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true; +} + +/** + * Small JavaScript module for the documentation. + */ +var Documentation = { + + init : function() { + this.fixFirefoxAnchorBug(); + this.highlightSearchWords(); + this.initIndexTable(); + + }, + + /** + * i18n support + */ + TRANSLATIONS : {}, + PLURAL_EXPR : function(n) { return n == 1 ? 0 : 1; }, + LOCALE : 'unknown', + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext : function(string) { + var translated = Documentation.TRANSLATIONS[string]; + if (typeof translated == 'undefined') + return string; + return (typeof translated == 'string') ? translated : translated[0]; + }, + + ngettext : function(singular, plural, n) { + var translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated == 'undefined') + return (n == 1) ? singular : plural; + return translated[Documentation.PLURALEXPR(n)]; + }, + + addTranslations : function(catalog) { + for (var key in catalog.messages) + this.TRANSLATIONS[key] = catalog.messages[key]; + this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')'); + this.LOCALE = catalog.locale; + }, + + /** + * add context elements like header anchor links + */ + addContextElements : function() { + $('div[id] > :header:first').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this headline')). + appendTo(this); + }); + $('dt[id]').each(function() { + $('\u00B6'). + attr('href', '#' + this.id). + attr('title', _('Permalink to this definition')). + appendTo(this); + }); + }, + + /** + * workaround a firefox stupidity + * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075 + */ + fixFirefoxAnchorBug : function() { + if (document.location.hash) + window.setTimeout(function() { + document.location.href += ''; + }, 10); + }, + + /** + * highlight the search words provided in the url in the text + */ + highlightSearchWords : function() { + var params = $.getQueryParameters(); + var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : []; + if (terms.length) { + var body = $('div.body'); + if (!body.length) { + body = $('body'); + } + window.setTimeout(function() { + $.each(terms, function() { + body.highlightText(this.toLowerCase(), 'highlighted'); + }); + }, 10); + $('') + .appendTo($('#searchbox')); + } + }, + + /** + * init the domain index toggle buttons + */ + initIndexTable : function() { + var togglers = $('img.toggler').click(function() { + var src = $(this).attr('src'); + var idnum = $(this).attr('id').substr(7); + $('tr.cg-' + idnum).toggle(); + if (src.substr(-9) == 'minus.png') + $(this).attr('src', src.substr(0, src.length-9) + 'plus.png'); + else + $(this).attr('src', src.substr(0, src.length-8) + 'minus.png'); + }).css('display', ''); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) { + togglers.click(); + } + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords : function() { + $('#searchbox .highlight-link').fadeOut(300); + $('span.highlighted').removeClass('highlighted'); + }, + + /** + * make the url absolute + */ + makeURL : function(relativeURL) { + return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL; + }, + + /** + * get the current relative url + */ + getCurrentURL : function() { + var path = document.location.pathname; + var parts = path.split(/\//); + $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() { + if (this == '..') + parts.pop(); + }); + var url = parts.join('/'); + return path.substring(url.lastIndexOf('/') + 1, path.length - 1); + }, + + initOnKeyListeners: function() { + $(document).keyup(function(event) { + var activeElementType = document.activeElement.tagName; + // don't navigate when in search box or textarea + if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT') { + switch (event.keyCode) { + case 37: // left + var prevHref = $('link[rel="prev"]').prop('href'); + if (prevHref) { + window.location.href = prevHref; + return false; + } + case 39: // right + var nextHref = $('link[rel="next"]').prop('href'); + if (nextHref) { + window.location.href = nextHref; + return false; + } + } + } + }); + } +}; + +// quick alias for translations +_ = Documentation.gettext; + +$(document).ready(function() { + Documentation.init(); +}); \ No newline at end of file diff --git a/doc/build/html/_static/down-pressed.png b/doc/build/html/_static/down-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..7c30d004b71b32bb2fc06b3bd4dc8278baab0946 GIT binary patch literal 347 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!63?wyl`GbKJV{wqX6T`Z5GB1G~&H|6fVxZ#d zAk65bF}ngN$X?><>&kwMor^(NtW3yF87Slz;1l8sq&LUMQwy<>&kwMol#tg zK_ydLmzem(vK1>2TzUEGl*lj!N<7$PCrdoWV0 z$w0*Ap!bZ4if7h;-yfL#MC0e;t{xY+$l~DX2EWYIPet1cohf^BdG+jXhtuq&W-0|c zKPmlKv-7OTjb}T)7@fTGd9y~u4{g8An;)c2U=w=nwQ7}zVDc>n+a literal 0 HcmV?d00001 diff --git a/doc/build/html/_static/file.png b/doc/build/html/_static/file.png new file mode 100644 index 0000000000000000000000000000000000000000..254c60bfbe2715ae2edca48ebccfd074deb8031d GIT binary patch literal 358 zcmeAS@N?(olHy`uVBq!ia0vp^0wB!63?wyl`GbKJXMsm#F#`j)FbFd;%$g$s6l5>) z^mS#w%FV~i&ZxO9L3Zxqw8>dd4I&zcKG){Yx14xKr0

ZQJ$m%mv17-NAAj}g)$7-<-@JMA z_U+TRK=AR}yLa#2zkmPX!-tO_KYsf3>Hq)#%qnY_1Fd8&3GxeO2wSmci|LJf=|BO- zByV>Yl`U*PX977no-U3d5|XS39sLdkFt8q|+|QqL_#ErUf6I%zFA7b%b>3$hFGGFs zc72AL|61pRJ1(+5wNdg|xP#*`gQ~lOnTFKiIjl#S3)+QV=h{~`9{M=hx#5uZ&-tIF sG!8onYS_8EFr8v&@CavkqYey&g)1epR*Fkm0PSV)boFyt=akR{044O6bN~PV literal 0 HcmV?d00001 diff --git a/doc/build/html/_static/jquery-1.11.1.js b/doc/build/html/_static/jquery-1.11.1.js new file mode 100644 index 0000000000..d4b67f7e6c --- /dev/null +++ b/doc/build/html/_static/jquery-1.11.1.js @@ -0,0 +1,10308 @@ +/*! + * jQuery JavaScript Library v1.11.1 + * http://jquery.com/ + * + * Includes Sizzle.js + * http://sizzlejs.com/ + * + * Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-05-01T17:42Z + */ + +(function( global, factory ) { + + if ( typeof module === "object" && typeof module.exports === "object" ) { + // For CommonJS and CommonJS-like environments where a proper window is present, + // execute the factory and get jQuery + // For environments that do not inherently posses a window with a document + // (such as Node.js), expose a jQuery-making factory as module.exports + // This accentuates the need for the creation of a real window + // e.g. var jQuery = require("jquery")(window); + // See ticket #14549 for more info + module.exports = global.document ? + factory( global, true ) : + function( w ) { + if ( !w.document ) { + throw new Error( "jQuery requires a window with a document" ); + } + return factory( w ); + }; + } else { + factory( global ); + } + +// Pass this if window is not defined yet +}(typeof window !== "undefined" ? window : this, function( window, noGlobal ) { + +// Can't do this because several apps including ASP.NET trace +// the stack via arguments.caller.callee and Firefox dies if +// you try to trace through "use strict" call chains. (#13335) +// Support: Firefox 18+ +// + +var deletedIds = []; + +var slice = deletedIds.slice; + +var concat = deletedIds.concat; + +var push = deletedIds.push; + +var indexOf = deletedIds.indexOf; + +var class2type = {}; + +var toString = class2type.toString; + +var hasOwn = class2type.hasOwnProperty; + +var support = {}; + + + +var + version = "1.11.1", + + // Define a local copy of jQuery + jQuery = function( selector, context ) { + // The jQuery object is actually just the init constructor 'enhanced' + // Need init if jQuery is called (just allow error to be thrown if not included) + return new jQuery.fn.init( selector, context ); + }, + + // Support: Android<4.1, IE<9 + // Make sure we trim BOM and NBSP + rtrim = /^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g, + + // Matches dashed string for camelizing + rmsPrefix = /^-ms-/, + rdashAlpha = /-([\da-z])/gi, + + // Used by jQuery.camelCase as callback to replace() + fcamelCase = function( all, letter ) { + return letter.toUpperCase(); + }; + +jQuery.fn = jQuery.prototype = { + // The current version of jQuery being used + jquery: version, + + constructor: jQuery, + + // Start with an empty selector + selector: "", + + // The default length of a jQuery object is 0 + length: 0, + + toArray: function() { + return slice.call( this ); + }, + + // Get the Nth element in the matched element set OR + // Get the whole matched element set as a clean array + get: function( num ) { + return num != null ? + + // Return just the one element from the set + ( num < 0 ? this[ num + this.length ] : this[ num ] ) : + + // Return all the elements in a clean array + slice.call( this ); + }, + + // Take an array of elements and push it onto the stack + // (returning the new matched element set) + pushStack: function( elems ) { + + // Build a new jQuery matched element set + var ret = jQuery.merge( this.constructor(), elems ); + + // Add the old object onto the stack (as a reference) + ret.prevObject = this; + ret.context = this.context; + + // Return the newly-formed element set + return ret; + }, + + // Execute a callback for every element in the matched set. + // (You can seed the arguments with an array of args, but this is + // only used internally.) + each: function( callback, args ) { + return jQuery.each( this, callback, args ); + }, + + map: function( callback ) { + return this.pushStack( jQuery.map(this, function( elem, i ) { + return callback.call( elem, i, elem ); + })); + }, + + slice: function() { + return this.pushStack( slice.apply( this, arguments ) ); + }, + + first: function() { + return this.eq( 0 ); + }, + + last: function() { + return this.eq( -1 ); + }, + + eq: function( i ) { + var len = this.length, + j = +i + ( i < 0 ? len : 0 ); + return this.pushStack( j >= 0 && j < len ? [ this[j] ] : [] ); + }, + + end: function() { + return this.prevObject || this.constructor(null); + }, + + // For internal use only. + // Behaves like an Array's method, not like a jQuery method. + push: push, + sort: deletedIds.sort, + splice: deletedIds.splice +}; + +jQuery.extend = jQuery.fn.extend = function() { + var src, copyIsArray, copy, name, options, clone, + target = arguments[0] || {}, + i = 1, + length = arguments.length, + deep = false; + + // Handle a deep copy situation + if ( typeof target === "boolean" ) { + deep = target; + + // skip the boolean and the target + target = arguments[ i ] || {}; + i++; + } + + // Handle case when target is a string or something (possible in deep copy) + if ( typeof target !== "object" && !jQuery.isFunction(target) ) { + target = {}; + } + + // extend jQuery itself if only one argument is passed + if ( i === length ) { + target = this; + i--; + } + + for ( ; i < length; i++ ) { + // Only deal with non-null/undefined values + if ( (options = arguments[ i ]) != null ) { + // Extend the base object + for ( name in options ) { + src = target[ name ]; + copy = options[ name ]; + + // Prevent never-ending loop + if ( target === copy ) { + continue; + } + + // Recurse if we're merging plain objects or arrays + if ( deep && copy && ( jQuery.isPlainObject(copy) || (copyIsArray = jQuery.isArray(copy)) ) ) { + if ( copyIsArray ) { + copyIsArray = false; + clone = src && jQuery.isArray(src) ? src : []; + + } else { + clone = src && jQuery.isPlainObject(src) ? src : {}; + } + + // Never move original objects, clone them + target[ name ] = jQuery.extend( deep, clone, copy ); + + // Don't bring in undefined values + } else if ( copy !== undefined ) { + target[ name ] = copy; + } + } + } + } + + // Return the modified object + return target; +}; + +jQuery.extend({ + // Unique for each copy of jQuery on the page + expando: "jQuery" + ( version + Math.random() ).replace( /\D/g, "" ), + + // Assume jQuery is ready without the ready module + isReady: true, + + error: function( msg ) { + throw new Error( msg ); + }, + + noop: function() {}, + + // See test/unit/core.js for details concerning isFunction. + // Since version 1.3, DOM methods and functions like alert + // aren't supported. They return false on IE (#2968). + isFunction: function( obj ) { + return jQuery.type(obj) === "function"; + }, + + isArray: Array.isArray || function( obj ) { + return jQuery.type(obj) === "array"; + }, + + isWindow: function( obj ) { + /* jshint eqeqeq: false */ + return obj != null && obj == obj.window; + }, + + isNumeric: function( obj ) { + // parseFloat NaNs numeric-cast false positives (null|true|false|"") + // ...but misinterprets leading-number strings, particularly hex literals ("0x...") + // subtraction forces infinities to NaN + return !jQuery.isArray( obj ) && obj - parseFloat( obj ) >= 0; + }, + + isEmptyObject: function( obj ) { + var name; + for ( name in obj ) { + return false; + } + return true; + }, + + isPlainObject: function( obj ) { + var key; + + // Must be an Object. + // Because of IE, we also have to check the presence of the constructor property. + // Make sure that DOM nodes and window objects don't pass through, as well + if ( !obj || jQuery.type(obj) !== "object" || obj.nodeType || jQuery.isWindow( obj ) ) { + return false; + } + + try { + // Not own constructor property must be Object + if ( obj.constructor && + !hasOwn.call(obj, "constructor") && + !hasOwn.call(obj.constructor.prototype, "isPrototypeOf") ) { + return false; + } + } catch ( e ) { + // IE8,9 Will throw exceptions on certain host objects #9897 + return false; + } + + // Support: IE<9 + // Handle iteration over inherited properties before own properties. + if ( support.ownLast ) { + for ( key in obj ) { + return hasOwn.call( obj, key ); + } + } + + // Own properties are enumerated firstly, so to speed up, + // if last one is own, then all properties are own. + for ( key in obj ) {} + + return key === undefined || hasOwn.call( obj, key ); + }, + + type: function( obj ) { + if ( obj == null ) { + return obj + ""; + } + return typeof obj === "object" || typeof obj === "function" ? + class2type[ toString.call(obj) ] || "object" : + typeof obj; + }, + + // Evaluates a script in a global context + // Workarounds based on findings by Jim Driscoll + // http://weblogs.java.net/blog/driscoll/archive/2009/09/08/eval-javascript-global-context + globalEval: function( data ) { + if ( data && jQuery.trim( data ) ) { + // We use execScript on Internet Explorer + // We use an anonymous function so that context is window + // rather than jQuery in Firefox + ( window.execScript || function( data ) { + window[ "eval" ].call( window, data ); + } )( data ); + } + }, + + // Convert dashed to camelCase; used by the css and data modules + // Microsoft forgot to hump their vendor prefix (#9572) + camelCase: function( string ) { + return string.replace( rmsPrefix, "ms-" ).replace( rdashAlpha, fcamelCase ); + }, + + nodeName: function( elem, name ) { + return elem.nodeName && elem.nodeName.toLowerCase() === name.toLowerCase(); + }, + + // args is for internal usage only + each: function( obj, callback, args ) { + var value, + i = 0, + length = obj.length, + isArray = isArraylike( obj ); + + if ( args ) { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.apply( obj[ i ], args ); + + if ( value === false ) { + break; + } + } + } + + // A special, fast, case for the most common use of each + } else { + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } else { + for ( i in obj ) { + value = callback.call( obj[ i ], i, obj[ i ] ); + + if ( value === false ) { + break; + } + } + } + } + + return obj; + }, + + // Support: Android<4.1, IE<9 + trim: function( text ) { + return text == null ? + "" : + ( text + "" ).replace( rtrim, "" ); + }, + + // results is for internal usage only + makeArray: function( arr, results ) { + var ret = results || []; + + if ( arr != null ) { + if ( isArraylike( Object(arr) ) ) { + jQuery.merge( ret, + typeof arr === "string" ? + [ arr ] : arr + ); + } else { + push.call( ret, arr ); + } + } + + return ret; + }, + + inArray: function( elem, arr, i ) { + var len; + + if ( arr ) { + if ( indexOf ) { + return indexOf.call( arr, elem, i ); + } + + len = arr.length; + i = i ? i < 0 ? Math.max( 0, len + i ) : i : 0; + + for ( ; i < len; i++ ) { + // Skip accessing in sparse arrays + if ( i in arr && arr[ i ] === elem ) { + return i; + } + } + } + + return -1; + }, + + merge: function( first, second ) { + var len = +second.length, + j = 0, + i = first.length; + + while ( j < len ) { + first[ i++ ] = second[ j++ ]; + } + + // Support: IE<9 + // Workaround casting of .length to NaN on otherwise arraylike objects (e.g., NodeLists) + if ( len !== len ) { + while ( second[j] !== undefined ) { + first[ i++ ] = second[ j++ ]; + } + } + + first.length = i; + + return first; + }, + + grep: function( elems, callback, invert ) { + var callbackInverse, + matches = [], + i = 0, + length = elems.length, + callbackExpect = !invert; + + // Go through the array, only saving the items + // that pass the validator function + for ( ; i < length; i++ ) { + callbackInverse = !callback( elems[ i ], i ); + if ( callbackInverse !== callbackExpect ) { + matches.push( elems[ i ] ); + } + } + + return matches; + }, + + // arg is for internal usage only + map: function( elems, callback, arg ) { + var value, + i = 0, + length = elems.length, + isArray = isArraylike( elems ), + ret = []; + + // Go through the array, translating each of the items to their new values + if ( isArray ) { + for ( ; i < length; i++ ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + + // Go through every key on the object, + } else { + for ( i in elems ) { + value = callback( elems[ i ], i, arg ); + + if ( value != null ) { + ret.push( value ); + } + } + } + + // Flatten any nested arrays + return concat.apply( [], ret ); + }, + + // A global GUID counter for objects + guid: 1, + + // Bind a function to a context, optionally partially applying any + // arguments. + proxy: function( fn, context ) { + var args, proxy, tmp; + + if ( typeof context === "string" ) { + tmp = fn[ context ]; + context = fn; + fn = tmp; + } + + // Quick check to determine if target is callable, in the spec + // this throws a TypeError, but we will just return undefined. + if ( !jQuery.isFunction( fn ) ) { + return undefined; + } + + // Simulated bind + args = slice.call( arguments, 2 ); + proxy = function() { + return fn.apply( context || this, args.concat( slice.call( arguments ) ) ); + }; + + // Set the guid of unique handler to the same of original handler, so it can be removed + proxy.guid = fn.guid = fn.guid || jQuery.guid++; + + return proxy; + }, + + now: function() { + return +( new Date() ); + }, + + // jQuery.support is not used in Core but other projects attach their + // properties to it so it needs to exist. + support: support +}); + +// Populate the class2type map +jQuery.each("Boolean Number String Function Array Date RegExp Object Error".split(" "), function(i, name) { + class2type[ "[object " + name + "]" ] = name.toLowerCase(); +}); + +function isArraylike( obj ) { + var length = obj.length, + type = jQuery.type( obj ); + + if ( type === "function" || jQuery.isWindow( obj ) ) { + return false; + } + + if ( obj.nodeType === 1 && length ) { + return true; + } + + return type === "array" || length === 0 || + typeof length === "number" && length > 0 && ( length - 1 ) in obj; +} +var Sizzle = +/*! + * Sizzle CSS Selector Engine v1.10.19 + * http://sizzlejs.com/ + * + * Copyright 2013 jQuery Foundation, Inc. and other contributors + * Released under the MIT license + * http://jquery.org/license + * + * Date: 2014-04-18 + */ +(function( window ) { + +var i, + support, + Expr, + getText, + isXML, + tokenize, + compile, + select, + outermostContext, + sortInput, + hasDuplicate, + + // Local document vars + setDocument, + document, + docElem, + documentIsHTML, + rbuggyQSA, + rbuggyMatches, + matches, + contains, + + // Instance-specific data + expando = "sizzle" + -(new Date()), + preferredDoc = window.document, + dirruns = 0, + done = 0, + classCache = createCache(), + tokenCache = createCache(), + compilerCache = createCache(), + sortOrder = function( a, b ) { + if ( a === b ) { + hasDuplicate = true; + } + return 0; + }, + + // General-purpose constants + strundefined = typeof undefined, + MAX_NEGATIVE = 1 << 31, + + // Instance methods + hasOwn = ({}).hasOwnProperty, + arr = [], + pop = arr.pop, + push_native = arr.push, + push = arr.push, + slice = arr.slice, + // Use a stripped-down indexOf if we can't use a native one + indexOf = arr.indexOf || function( elem ) { + var i = 0, + len = this.length; + for ( ; i < len; i++ ) { + if ( this[i] === elem ) { + return i; + } + } + return -1; + }, + + booleans = "checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped", + + // Regular expressions + + // Whitespace characters http://www.w3.org/TR/css3-selectors/#whitespace + whitespace = "[\\x20\\t\\r\\n\\f]", + // http://www.w3.org/TR/css3-syntax/#characters + characterEncoding = "(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+", + + // Loosely modeled on CSS identifier characters + // An unquoted value should be a CSS identifier http://www.w3.org/TR/css3-selectors/#attribute-selectors + // Proper syntax: http://www.w3.org/TR/CSS21/syndata.html#value-def-identifier + identifier = characterEncoding.replace( "w", "w#" ), + + // Attribute selectors: http://www.w3.org/TR/selectors/#attribute-selectors + attributes = "\\[" + whitespace + "*(" + characterEncoding + ")(?:" + whitespace + + // Operator (capture 2) + "*([*^$|!~]?=)" + whitespace + + // "Attribute values must be CSS identifiers [capture 5] or strings [capture 3 or capture 4]" + "*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|(" + identifier + "))|)" + whitespace + + "*\\]", + + pseudos = ":(" + characterEncoding + ")(?:\\((" + + // To reduce the number of selectors needing tokenize in the preFilter, prefer arguments: + // 1. quoted (capture 3; capture 4 or capture 5) + "('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|" + + // 2. simple (capture 6) + "((?:\\\\.|[^\\\\()[\\]]|" + attributes + ")*)|" + + // 3. anything else (capture 2) + ".*" + + ")\\)|)", + + // Leading and non-escaped trailing whitespace, capturing some non-whitespace characters preceding the latter + rtrim = new RegExp( "^" + whitespace + "+|((?:^|[^\\\\])(?:\\\\.)*)" + whitespace + "+$", "g" ), + + rcomma = new RegExp( "^" + whitespace + "*," + whitespace + "*" ), + rcombinators = new RegExp( "^" + whitespace + "*([>+~]|" + whitespace + ")" + whitespace + "*" ), + + rattributeQuotes = new RegExp( "=" + whitespace + "*([^\\]'\"]*?)" + whitespace + "*\\]", "g" ), + + rpseudo = new RegExp( pseudos ), + ridentifier = new RegExp( "^" + identifier + "$" ), + + matchExpr = { + "ID": new RegExp( "^#(" + characterEncoding + ")" ), + "CLASS": new RegExp( "^\\.(" + characterEncoding + ")" ), + "TAG": new RegExp( "^(" + characterEncoding.replace( "w", "w*" ) + ")" ), + "ATTR": new RegExp( "^" + attributes ), + "PSEUDO": new RegExp( "^" + pseudos ), + "CHILD": new RegExp( "^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\(" + whitespace + + "*(even|odd|(([+-]|)(\\d*)n|)" + whitespace + "*(?:([+-]|)" + whitespace + + "*(\\d+)|))" + whitespace + "*\\)|)", "i" ), + "bool": new RegExp( "^(?:" + booleans + ")$", "i" ), + // For use in libraries implementing .is() + // We use this for POS matching in `select` + "needsContext": new RegExp( "^" + whitespace + "*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\(" + + whitespace + "*((?:-\\d)?\\d*)" + whitespace + "*\\)|)(?=[^-]|$)", "i" ) + }, + + rinputs = /^(?:input|select|textarea|button)$/i, + rheader = /^h\d$/i, + + rnative = /^[^{]+\{\s*\[native \w/, + + // Easily-parseable/retrievable ID or TAG or CLASS selectors + rquickExpr = /^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/, + + rsibling = /[+~]/, + rescape = /'|\\/g, + + // CSS escapes http://www.w3.org/TR/CSS21/syndata.html#escaped-characters + runescape = new RegExp( "\\\\([\\da-f]{1,6}" + whitespace + "?|(" + whitespace + ")|.)", "ig" ), + funescape = function( _, escaped, escapedWhitespace ) { + var high = "0x" + escaped - 0x10000; + // NaN means non-codepoint + // Support: Firefox<24 + // Workaround erroneous numeric interpretation of +"0x" + return high !== high || escapedWhitespace ? + escaped : + high < 0 ? + // BMP codepoint + String.fromCharCode( high + 0x10000 ) : + // Supplemental Plane codepoint (surrogate pair) + String.fromCharCode( high >> 10 | 0xD800, high & 0x3FF | 0xDC00 ); + }; + +// Optimize for push.apply( _, NodeList ) +try { + push.apply( + (arr = slice.call( preferredDoc.childNodes )), + preferredDoc.childNodes + ); + // Support: Android<4.0 + // Detect silently failing push.apply + arr[ preferredDoc.childNodes.length ].nodeType; +} catch ( e ) { + push = { apply: arr.length ? + + // Leverage slice if possible + function( target, els ) { + push_native.apply( target, slice.call(els) ); + } : + + // Support: IE<9 + // Otherwise append directly + function( target, els ) { + var j = target.length, + i = 0; + // Can't trust NodeList.length + while ( (target[j++] = els[i++]) ) {} + target.length = j - 1; + } + }; +} + +function Sizzle( selector, context, results, seed ) { + var match, elem, m, nodeType, + // QSA vars + i, groups, old, nid, newContext, newSelector; + + if ( ( context ? context.ownerDocument || context : preferredDoc ) !== document ) { + setDocument( context ); + } + + context = context || document; + results = results || []; + + if ( !selector || typeof selector !== "string" ) { + return results; + } + + if ( (nodeType = context.nodeType) !== 1 && nodeType !== 9 ) { + return []; + } + + if ( documentIsHTML && !seed ) { + + // Shortcuts + if ( (match = rquickExpr.exec( selector )) ) { + // Speed-up: Sizzle("#ID") + if ( (m = match[1]) ) { + if ( nodeType === 9 ) { + elem = context.getElementById( m ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document (jQuery #6963) + if ( elem && elem.parentNode ) { + // Handle the case where IE, Opera, and Webkit return items + // by name instead of ID + if ( elem.id === m ) { + results.push( elem ); + return results; + } + } else { + return results; + } + } else { + // Context is not a document + if ( context.ownerDocument && (elem = context.ownerDocument.getElementById( m )) && + contains( context, elem ) && elem.id === m ) { + results.push( elem ); + return results; + } + } + + // Speed-up: Sizzle("TAG") + } else if ( match[2] ) { + push.apply( results, context.getElementsByTagName( selector ) ); + return results; + + // Speed-up: Sizzle(".CLASS") + } else if ( (m = match[3]) && support.getElementsByClassName && context.getElementsByClassName ) { + push.apply( results, context.getElementsByClassName( m ) ); + return results; + } + } + + // QSA path + if ( support.qsa && (!rbuggyQSA || !rbuggyQSA.test( selector )) ) { + nid = old = expando; + newContext = context; + newSelector = nodeType === 9 && selector; + + // qSA works strangely on Element-rooted queries + // We can work around this by specifying an extra ID on the root + // and working up from there (Thanks to Andrew Dupont for the technique) + // IE 8 doesn't work on object elements + if ( nodeType === 1 && context.nodeName.toLowerCase() !== "object" ) { + groups = tokenize( selector ); + + if ( (old = context.getAttribute("id")) ) { + nid = old.replace( rescape, "\\$&" ); + } else { + context.setAttribute( "id", nid ); + } + nid = "[id='" + nid + "'] "; + + i = groups.length; + while ( i-- ) { + groups[i] = nid + toSelector( groups[i] ); + } + newContext = rsibling.test( selector ) && testContext( context.parentNode ) || context; + newSelector = groups.join(","); + } + + if ( newSelector ) { + try { + push.apply( results, + newContext.querySelectorAll( newSelector ) + ); + return results; + } catch(qsaError) { + } finally { + if ( !old ) { + context.removeAttribute("id"); + } + } + } + } + } + + // All others + return select( selector.replace( rtrim, "$1" ), context, results, seed ); +} + +/** + * Create key-value caches of limited size + * @returns {Function(string, Object)} Returns the Object data after storing it on itself with + * property name the (space-suffixed) string and (if the cache is larger than Expr.cacheLength) + * deleting the oldest entry + */ +function createCache() { + var keys = []; + + function cache( key, value ) { + // Use (key + " ") to avoid collision with native prototype properties (see Issue #157) + if ( keys.push( key + " " ) > Expr.cacheLength ) { + // Only keep the most recent entries + delete cache[ keys.shift() ]; + } + return (cache[ key + " " ] = value); + } + return cache; +} + +/** + * Mark a function for special use by Sizzle + * @param {Function} fn The function to mark + */ +function markFunction( fn ) { + fn[ expando ] = true; + return fn; +} + +/** + * Support testing using an element + * @param {Function} fn Passed the created div and expects a boolean result + */ +function assert( fn ) { + var div = document.createElement("div"); + + try { + return !!fn( div ); + } catch (e) { + return false; + } finally { + // Remove from its parent by default + if ( div.parentNode ) { + div.parentNode.removeChild( div ); + } + // release memory in IE + div = null; + } +} + +/** + * Adds the same handler for all of the specified attrs + * @param {String} attrs Pipe-separated list of attributes + * @param {Function} handler The method that will be applied + */ +function addHandle( attrs, handler ) { + var arr = attrs.split("|"), + i = attrs.length; + + while ( i-- ) { + Expr.attrHandle[ arr[i] ] = handler; + } +} + +/** + * Checks document order of two siblings + * @param {Element} a + * @param {Element} b + * @returns {Number} Returns less than 0 if a precedes b, greater than 0 if a follows b + */ +function siblingCheck( a, b ) { + var cur = b && a, + diff = cur && a.nodeType === 1 && b.nodeType === 1 && + ( ~b.sourceIndex || MAX_NEGATIVE ) - + ( ~a.sourceIndex || MAX_NEGATIVE ); + + // Use IE sourceIndex if available on both nodes + if ( diff ) { + return diff; + } + + // Check if b follows a + if ( cur ) { + while ( (cur = cur.nextSibling) ) { + if ( cur === b ) { + return -1; + } + } + } + + return a ? 1 : -1; +} + +/** + * Returns a function to use in pseudos for input types + * @param {String} type + */ +function createInputPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for buttons + * @param {String} type + */ +function createButtonPseudo( type ) { + return function( elem ) { + var name = elem.nodeName.toLowerCase(); + return (name === "input" || name === "button") && elem.type === type; + }; +} + +/** + * Returns a function to use in pseudos for positionals + * @param {Function} fn + */ +function createPositionalPseudo( fn ) { + return markFunction(function( argument ) { + argument = +argument; + return markFunction(function( seed, matches ) { + var j, + matchIndexes = fn( [], seed.length, argument ), + i = matchIndexes.length; + + // Match elements found at the specified indexes + while ( i-- ) { + if ( seed[ (j = matchIndexes[i]) ] ) { + seed[j] = !(matches[j] = seed[j]); + } + } + }); + }); +} + +/** + * Checks a node for validity as a Sizzle context + * @param {Element|Object=} context + * @returns {Element|Object|Boolean} The input node if acceptable, otherwise a falsy value + */ +function testContext( context ) { + return context && typeof context.getElementsByTagName !== strundefined && context; +} + +// Expose support vars for convenience +support = Sizzle.support = {}; + +/** + * Detects XML nodes + * @param {Element|Object} elem An element or a document + * @returns {Boolean} True iff elem is a non-HTML XML node + */ +isXML = Sizzle.isXML = function( elem ) { + // documentElement is verified for cases where it doesn't yet exist + // (such as loading iframes in IE - #4833) + var documentElement = elem && (elem.ownerDocument || elem).documentElement; + return documentElement ? documentElement.nodeName !== "HTML" : false; +}; + +/** + * Sets document-related variables once based on the current document + * @param {Element|Object} [doc] An element or document object to use to set the document + * @returns {Object} Returns the current document + */ +setDocument = Sizzle.setDocument = function( node ) { + var hasCompare, + doc = node ? node.ownerDocument || node : preferredDoc, + parent = doc.defaultView; + + // If no document and documentElement is available, return + if ( doc === document || doc.nodeType !== 9 || !doc.documentElement ) { + return document; + } + + // Set our document + document = doc; + docElem = doc.documentElement; + + // Support tests + documentIsHTML = !isXML( doc ); + + // Support: IE>8 + // If iframe document is assigned to "document" variable and if iframe has been reloaded, + // IE will throw "permission denied" error when accessing "document" variable, see jQuery #13936 + // IE6-8 do not support the defaultView property so parent will be undefined + if ( parent && parent !== parent.top ) { + // IE11 does not have attachEvent, so all must suffer + if ( parent.addEventListener ) { + parent.addEventListener( "unload", function() { + setDocument(); + }, false ); + } else if ( parent.attachEvent ) { + parent.attachEvent( "onunload", function() { + setDocument(); + }); + } + } + + /* Attributes + ---------------------------------------------------------------------- */ + + // Support: IE<8 + // Verify that getAttribute really returns attributes and not properties (excepting IE8 booleans) + support.attributes = assert(function( div ) { + div.className = "i"; + return !div.getAttribute("className"); + }); + + /* getElement(s)By* + ---------------------------------------------------------------------- */ + + // Check if getElementsByTagName("*") returns only elements + support.getElementsByTagName = assert(function( div ) { + div.appendChild( doc.createComment("") ); + return !div.getElementsByTagName("*").length; + }); + + // Check if getElementsByClassName can be trusted + support.getElementsByClassName = rnative.test( doc.getElementsByClassName ) && assert(function( div ) { + div.innerHTML = "

"; + + // Support: Safari<4 + // Catch class over-caching + div.firstChild.className = "i"; + // Support: Opera<10 + // Catch gEBCN failure to find non-leading classes + return div.getElementsByClassName("i").length === 2; + }); + + // Support: IE<10 + // Check if getElementById returns elements by name + // The broken getElementById methods don't pick up programatically-set names, + // so use a roundabout getElementsByName test + support.getById = assert(function( div ) { + docElem.appendChild( div ).id = expando; + return !doc.getElementsByName || !doc.getElementsByName( expando ).length; + }); + + // ID find and filter + if ( support.getById ) { + Expr.find["ID"] = function( id, context ) { + if ( typeof context.getElementById !== strundefined && documentIsHTML ) { + var m = context.getElementById( id ); + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + return m && m.parentNode ? [ m ] : []; + } + }; + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + return elem.getAttribute("id") === attrId; + }; + }; + } else { + // Support: IE6/7 + // getElementById is not reliable as a find shortcut + delete Expr.find["ID"]; + + Expr.filter["ID"] = function( id ) { + var attrId = id.replace( runescape, funescape ); + return function( elem ) { + var node = typeof elem.getAttributeNode !== strundefined && elem.getAttributeNode("id"); + return node && node.value === attrId; + }; + }; + } + + // Tag + Expr.find["TAG"] = support.getElementsByTagName ? + function( tag, context ) { + if ( typeof context.getElementsByTagName !== strundefined ) { + return context.getElementsByTagName( tag ); + } + } : + function( tag, context ) { + var elem, + tmp = [], + i = 0, + results = context.getElementsByTagName( tag ); + + // Filter out possible comments + if ( tag === "*" ) { + while ( (elem = results[i++]) ) { + if ( elem.nodeType === 1 ) { + tmp.push( elem ); + } + } + + return tmp; + } + return results; + }; + + // Class + Expr.find["CLASS"] = support.getElementsByClassName && function( className, context ) { + if ( typeof context.getElementsByClassName !== strundefined && documentIsHTML ) { + return context.getElementsByClassName( className ); + } + }; + + /* QSA/matchesSelector + ---------------------------------------------------------------------- */ + + // QSA and matchesSelector support + + // matchesSelector(:active) reports false when true (IE9/Opera 11.5) + rbuggyMatches = []; + + // qSa(:focus) reports false when true (Chrome 21) + // We allow this because of a bug in IE8/9 that throws an error + // whenever `document.activeElement` is accessed on an iframe + // So, we allow :focus to pass through QSA all the time to avoid the IE error + // See http://bugs.jquery.com/ticket/13378 + rbuggyQSA = []; + + if ( (support.qsa = rnative.test( doc.querySelectorAll )) ) { + // Build QSA regex + // Regex strategy adopted from Diego Perini + assert(function( div ) { + // Select is set to empty string on purpose + // This is to test IE's treatment of not explicitly + // setting a boolean content attribute, + // since its presence should be enough + // http://bugs.jquery.com/ticket/12359 + div.innerHTML = ""; + + // Support: IE8, Opera 11-12.16 + // Nothing should be selected when empty strings follow ^= or $= or *= + // The test attribute must be unknown in Opera but "safe" for WinRT + // http://msdn.microsoft.com/en-us/library/ie/hh465388.aspx#attribute_section + if ( div.querySelectorAll("[msallowclip^='']").length ) { + rbuggyQSA.push( "[*^$]=" + whitespace + "*(?:''|\"\")" ); + } + + // Support: IE8 + // Boolean attributes and "value" are not treated correctly + if ( !div.querySelectorAll("[selected]").length ) { + rbuggyQSA.push( "\\[" + whitespace + "*(?:value|" + booleans + ")" ); + } + + // Webkit/Opera - :checked should return selected option elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":checked").length ) { + rbuggyQSA.push(":checked"); + } + }); + + assert(function( div ) { + // Support: Windows 8 Native Apps + // The type and name attributes are restricted during .innerHTML assignment + var input = doc.createElement("input"); + input.setAttribute( "type", "hidden" ); + div.appendChild( input ).setAttribute( "name", "D" ); + + // Support: IE8 + // Enforce case-sensitivity of name attribute + if ( div.querySelectorAll("[name=d]").length ) { + rbuggyQSA.push( "name" + whitespace + "*[*^$|!~]?=" ); + } + + // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) + // IE8 throws error here and will not see later tests + if ( !div.querySelectorAll(":enabled").length ) { + rbuggyQSA.push( ":enabled", ":disabled" ); + } + + // Opera 10-11 does not throw on post-comma invalid pseudos + div.querySelectorAll("*,:x"); + rbuggyQSA.push(",.*:"); + }); + } + + if ( (support.matchesSelector = rnative.test( (matches = docElem.matches || + docElem.webkitMatchesSelector || + docElem.mozMatchesSelector || + docElem.oMatchesSelector || + docElem.msMatchesSelector) )) ) { + + assert(function( div ) { + // Check to see if it's possible to do matchesSelector + // on a disconnected node (IE 9) + support.disconnectedMatch = matches.call( div, "div" ); + + // This should fail with an exception + // Gecko does not error, returns false instead + matches.call( div, "[s!='']:x" ); + rbuggyMatches.push( "!=", pseudos ); + }); + } + + rbuggyQSA = rbuggyQSA.length && new RegExp( rbuggyQSA.join("|") ); + rbuggyMatches = rbuggyMatches.length && new RegExp( rbuggyMatches.join("|") ); + + /* Contains + ---------------------------------------------------------------------- */ + hasCompare = rnative.test( docElem.compareDocumentPosition ); + + // Element contains another + // Purposefully does not implement inclusive descendent + // As in, an element does not contain itself + contains = hasCompare || rnative.test( docElem.contains ) ? + function( a, b ) { + var adown = a.nodeType === 9 ? a.documentElement : a, + bup = b && b.parentNode; + return a === bup || !!( bup && bup.nodeType === 1 && ( + adown.contains ? + adown.contains( bup ) : + a.compareDocumentPosition && a.compareDocumentPosition( bup ) & 16 + )); + } : + function( a, b ) { + if ( b ) { + while ( (b = b.parentNode) ) { + if ( b === a ) { + return true; + } + } + } + return false; + }; + + /* Sorting + ---------------------------------------------------------------------- */ + + // Document order sorting + sortOrder = hasCompare ? + function( a, b ) { + + // Flag for duplicate removal + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + // Sort on method existence if only one input has compareDocumentPosition + var compare = !a.compareDocumentPosition - !b.compareDocumentPosition; + if ( compare ) { + return compare; + } + + // Calculate position if both inputs belong to the same document + compare = ( a.ownerDocument || a ) === ( b.ownerDocument || b ) ? + a.compareDocumentPosition( b ) : + + // Otherwise we know they are disconnected + 1; + + // Disconnected nodes + if ( compare & 1 || + (!support.sortDetached && b.compareDocumentPosition( a ) === compare) ) { + + // Choose the first element that is related to our preferred document + if ( a === doc || a.ownerDocument === preferredDoc && contains(preferredDoc, a) ) { + return -1; + } + if ( b === doc || b.ownerDocument === preferredDoc && contains(preferredDoc, b) ) { + return 1; + } + + // Maintain original order + return sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + } + + return compare & 4 ? -1 : 1; + } : + function( a, b ) { + // Exit early if the nodes are identical + if ( a === b ) { + hasDuplicate = true; + return 0; + } + + var cur, + i = 0, + aup = a.parentNode, + bup = b.parentNode, + ap = [ a ], + bp = [ b ]; + + // Parentless nodes are either documents or disconnected + if ( !aup || !bup ) { + return a === doc ? -1 : + b === doc ? 1 : + aup ? -1 : + bup ? 1 : + sortInput ? + ( indexOf.call( sortInput, a ) - indexOf.call( sortInput, b ) ) : + 0; + + // If the nodes are siblings, we can do a quick check + } else if ( aup === bup ) { + return siblingCheck( a, b ); + } + + // Otherwise we need full lists of their ancestors for comparison + cur = a; + while ( (cur = cur.parentNode) ) { + ap.unshift( cur ); + } + cur = b; + while ( (cur = cur.parentNode) ) { + bp.unshift( cur ); + } + + // Walk down the tree looking for a discrepancy + while ( ap[i] === bp[i] ) { + i++; + } + + return i ? + // Do a sibling check if the nodes have a common ancestor + siblingCheck( ap[i], bp[i] ) : + + // Otherwise nodes in our document sort first + ap[i] === preferredDoc ? -1 : + bp[i] === preferredDoc ? 1 : + 0; + }; + + return doc; +}; + +Sizzle.matches = function( expr, elements ) { + return Sizzle( expr, null, null, elements ); +}; + +Sizzle.matchesSelector = function( elem, expr ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + // Make sure that attribute selectors are quoted + expr = expr.replace( rattributeQuotes, "='$1']" ); + + if ( support.matchesSelector && documentIsHTML && + ( !rbuggyMatches || !rbuggyMatches.test( expr ) ) && + ( !rbuggyQSA || !rbuggyQSA.test( expr ) ) ) { + + try { + var ret = matches.call( elem, expr ); + + // IE 9's matchesSelector returns false on disconnected nodes + if ( ret || support.disconnectedMatch || + // As well, disconnected nodes are said to be in a document + // fragment in IE 9 + elem.document && elem.document.nodeType !== 11 ) { + return ret; + } + } catch(e) {} + } + + return Sizzle( expr, document, null, [ elem ] ).length > 0; +}; + +Sizzle.contains = function( context, elem ) { + // Set document vars if needed + if ( ( context.ownerDocument || context ) !== document ) { + setDocument( context ); + } + return contains( context, elem ); +}; + +Sizzle.attr = function( elem, name ) { + // Set document vars if needed + if ( ( elem.ownerDocument || elem ) !== document ) { + setDocument( elem ); + } + + var fn = Expr.attrHandle[ name.toLowerCase() ], + // Don't get fooled by Object.prototype properties (jQuery #13807) + val = fn && hasOwn.call( Expr.attrHandle, name.toLowerCase() ) ? + fn( elem, name, !documentIsHTML ) : + undefined; + + return val !== undefined ? + val : + support.attributes || !documentIsHTML ? + elem.getAttribute( name ) : + (val = elem.getAttributeNode(name)) && val.specified ? + val.value : + null; +}; + +Sizzle.error = function( msg ) { + throw new Error( "Syntax error, unrecognized expression: " + msg ); +}; + +/** + * Document sorting and removing duplicates + * @param {ArrayLike} results + */ +Sizzle.uniqueSort = function( results ) { + var elem, + duplicates = [], + j = 0, + i = 0; + + // Unless we *know* we can detect duplicates, assume their presence + hasDuplicate = !support.detectDuplicates; + sortInput = !support.sortStable && results.slice( 0 ); + results.sort( sortOrder ); + + if ( hasDuplicate ) { + while ( (elem = results[i++]) ) { + if ( elem === results[ i ] ) { + j = duplicates.push( i ); + } + } + while ( j-- ) { + results.splice( duplicates[ j ], 1 ); + } + } + + // Clear input after sorting to release objects + // See https://github.com/jquery/sizzle/pull/225 + sortInput = null; + + return results; +}; + +/** + * Utility function for retrieving the text value of an array of DOM nodes + * @param {Array|Element} elem + */ +getText = Sizzle.getText = function( elem ) { + var node, + ret = "", + i = 0, + nodeType = elem.nodeType; + + if ( !nodeType ) { + // If no nodeType, this is expected to be an array + while ( (node = elem[i++]) ) { + // Do not traverse comment nodes + ret += getText( node ); + } + } else if ( nodeType === 1 || nodeType === 9 || nodeType === 11 ) { + // Use textContent for elements + // innerText usage removed for consistency of new lines (jQuery #11153) + if ( typeof elem.textContent === "string" ) { + return elem.textContent; + } else { + // Traverse its children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + ret += getText( elem ); + } + } + } else if ( nodeType === 3 || nodeType === 4 ) { + return elem.nodeValue; + } + // Do not include comment or processing instruction nodes + + return ret; +}; + +Expr = Sizzle.selectors = { + + // Can be adjusted by the user + cacheLength: 50, + + createPseudo: markFunction, + + match: matchExpr, + + attrHandle: {}, + + find: {}, + + relative: { + ">": { dir: "parentNode", first: true }, + " ": { dir: "parentNode" }, + "+": { dir: "previousSibling", first: true }, + "~": { dir: "previousSibling" } + }, + + preFilter: { + "ATTR": function( match ) { + match[1] = match[1].replace( runescape, funescape ); + + // Move the given value to match[3] whether quoted or unquoted + match[3] = ( match[3] || match[4] || match[5] || "" ).replace( runescape, funescape ); + + if ( match[2] === "~=" ) { + match[3] = " " + match[3] + " "; + } + + return match.slice( 0, 4 ); + }, + + "CHILD": function( match ) { + /* matches from matchExpr["CHILD"] + 1 type (only|nth|...) + 2 what (child|of-type) + 3 argument (even|odd|\d*|\d*n([+-]\d+)?|...) + 4 xn-component of xn+y argument ([+-]?\d*n|) + 5 sign of xn-component + 6 x of xn-component + 7 sign of y-component + 8 y of y-component + */ + match[1] = match[1].toLowerCase(); + + if ( match[1].slice( 0, 3 ) === "nth" ) { + // nth-* requires argument + if ( !match[3] ) { + Sizzle.error( match[0] ); + } + + // numeric x and y parameters for Expr.filter.CHILD + // remember that false/true cast respectively to 0/1 + match[4] = +( match[4] ? match[5] + (match[6] || 1) : 2 * ( match[3] === "even" || match[3] === "odd" ) ); + match[5] = +( ( match[7] + match[8] ) || match[3] === "odd" ); + + // other types prohibit arguments + } else if ( match[3] ) { + Sizzle.error( match[0] ); + } + + return match; + }, + + "PSEUDO": function( match ) { + var excess, + unquoted = !match[6] && match[2]; + + if ( matchExpr["CHILD"].test( match[0] ) ) { + return null; + } + + // Accept quoted arguments as-is + if ( match[3] ) { + match[2] = match[4] || match[5] || ""; + + // Strip excess characters from unquoted arguments + } else if ( unquoted && rpseudo.test( unquoted ) && + // Get excess from tokenize (recursively) + (excess = tokenize( unquoted, true )) && + // advance to the next closing parenthesis + (excess = unquoted.indexOf( ")", unquoted.length - excess ) - unquoted.length) ) { + + // excess is a negative index + match[0] = match[0].slice( 0, excess ); + match[2] = unquoted.slice( 0, excess ); + } + + // Return only captures needed by the pseudo filter method (type and argument) + return match.slice( 0, 3 ); + } + }, + + filter: { + + "TAG": function( nodeNameSelector ) { + var nodeName = nodeNameSelector.replace( runescape, funescape ).toLowerCase(); + return nodeNameSelector === "*" ? + function() { return true; } : + function( elem ) { + return elem.nodeName && elem.nodeName.toLowerCase() === nodeName; + }; + }, + + "CLASS": function( className ) { + var pattern = classCache[ className + " " ]; + + return pattern || + (pattern = new RegExp( "(^|" + whitespace + ")" + className + "(" + whitespace + "|$)" )) && + classCache( className, function( elem ) { + return pattern.test( typeof elem.className === "string" && elem.className || typeof elem.getAttribute !== strundefined && elem.getAttribute("class") || "" ); + }); + }, + + "ATTR": function( name, operator, check ) { + return function( elem ) { + var result = Sizzle.attr( elem, name ); + + if ( result == null ) { + return operator === "!="; + } + if ( !operator ) { + return true; + } + + result += ""; + + return operator === "=" ? result === check : + operator === "!=" ? result !== check : + operator === "^=" ? check && result.indexOf( check ) === 0 : + operator === "*=" ? check && result.indexOf( check ) > -1 : + operator === "$=" ? check && result.slice( -check.length ) === check : + operator === "~=" ? ( " " + result + " " ).indexOf( check ) > -1 : + operator === "|=" ? result === check || result.slice( 0, check.length + 1 ) === check + "-" : + false; + }; + }, + + "CHILD": function( type, what, argument, first, last ) { + var simple = type.slice( 0, 3 ) !== "nth", + forward = type.slice( -4 ) !== "last", + ofType = what === "of-type"; + + return first === 1 && last === 0 ? + + // Shortcut for :nth-*(n) + function( elem ) { + return !!elem.parentNode; + } : + + function( elem, context, xml ) { + var cache, outerCache, node, diff, nodeIndex, start, + dir = simple !== forward ? "nextSibling" : "previousSibling", + parent = elem.parentNode, + name = ofType && elem.nodeName.toLowerCase(), + useCache = !xml && !ofType; + + if ( parent ) { + + // :(first|last|only)-(child|of-type) + if ( simple ) { + while ( dir ) { + node = elem; + while ( (node = node[ dir ]) ) { + if ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) { + return false; + } + } + // Reverse direction for :only-* (if we haven't yet done so) + start = dir = type === "only" && !start && "nextSibling"; + } + return true; + } + + start = [ forward ? parent.firstChild : parent.lastChild ]; + + // non-xml :nth-child(...) stores cache data on `parent` + if ( forward && useCache ) { + // Seek `elem` from a previously-cached index + outerCache = parent[ expando ] || (parent[ expando ] = {}); + cache = outerCache[ type ] || []; + nodeIndex = cache[0] === dirruns && cache[1]; + diff = cache[0] === dirruns && cache[2]; + node = nodeIndex && parent.childNodes[ nodeIndex ]; + + while ( (node = ++nodeIndex && node && node[ dir ] || + + // Fallback to seeking `elem` from the start + (diff = nodeIndex = 0) || start.pop()) ) { + + // When found, cache indexes on `parent` and break + if ( node.nodeType === 1 && ++diff && node === elem ) { + outerCache[ type ] = [ dirruns, nodeIndex, diff ]; + break; + } + } + + // Use previously-cached element index if available + } else if ( useCache && (cache = (elem[ expando ] || (elem[ expando ] = {}))[ type ]) && cache[0] === dirruns ) { + diff = cache[1]; + + // xml :nth-child(...) or :nth-last-child(...) or :nth(-last)?-of-type(...) + } else { + // Use the same loop as above to seek `elem` from the start + while ( (node = ++nodeIndex && node && node[ dir ] || + (diff = nodeIndex = 0) || start.pop()) ) { + + if ( ( ofType ? node.nodeName.toLowerCase() === name : node.nodeType === 1 ) && ++diff ) { + // Cache the index of each encountered element + if ( useCache ) { + (node[ expando ] || (node[ expando ] = {}))[ type ] = [ dirruns, diff ]; + } + + if ( node === elem ) { + break; + } + } + } + } + + // Incorporate the offset, then check against cycle size + diff -= last; + return diff === first || ( diff % first === 0 && diff / first >= 0 ); + } + }; + }, + + "PSEUDO": function( pseudo, argument ) { + // pseudo-class names are case-insensitive + // http://www.w3.org/TR/selectors/#pseudo-classes + // Prioritize by case sensitivity in case custom pseudos are added with uppercase letters + // Remember that setFilters inherits from pseudos + var args, + fn = Expr.pseudos[ pseudo ] || Expr.setFilters[ pseudo.toLowerCase() ] || + Sizzle.error( "unsupported pseudo: " + pseudo ); + + // The user may use createPseudo to indicate that + // arguments are needed to create the filter function + // just as Sizzle does + if ( fn[ expando ] ) { + return fn( argument ); + } + + // But maintain support for old signatures + if ( fn.length > 1 ) { + args = [ pseudo, pseudo, "", argument ]; + return Expr.setFilters.hasOwnProperty( pseudo.toLowerCase() ) ? + markFunction(function( seed, matches ) { + var idx, + matched = fn( seed, argument ), + i = matched.length; + while ( i-- ) { + idx = indexOf.call( seed, matched[i] ); + seed[ idx ] = !( matches[ idx ] = matched[i] ); + } + }) : + function( elem ) { + return fn( elem, 0, args ); + }; + } + + return fn; + } + }, + + pseudos: { + // Potentially complex pseudos + "not": markFunction(function( selector ) { + // Trim the selector passed to compile + // to avoid treating leading and trailing + // spaces as combinators + var input = [], + results = [], + matcher = compile( selector.replace( rtrim, "$1" ) ); + + return matcher[ expando ] ? + markFunction(function( seed, matches, context, xml ) { + var elem, + unmatched = matcher( seed, null, xml, [] ), + i = seed.length; + + // Match elements unmatched by `matcher` + while ( i-- ) { + if ( (elem = unmatched[i]) ) { + seed[i] = !(matches[i] = elem); + } + } + }) : + function( elem, context, xml ) { + input[0] = elem; + matcher( input, null, xml, results ); + return !results.pop(); + }; + }), + + "has": markFunction(function( selector ) { + return function( elem ) { + return Sizzle( selector, elem ).length > 0; + }; + }), + + "contains": markFunction(function( text ) { + return function( elem ) { + return ( elem.textContent || elem.innerText || getText( elem ) ).indexOf( text ) > -1; + }; + }), + + // "Whether an element is represented by a :lang() selector + // is based solely on the element's language value + // being equal to the identifier C, + // or beginning with the identifier C immediately followed by "-". + // The matching of C against the element's language value is performed case-insensitively. + // The identifier C does not have to be a valid language name." + // http://www.w3.org/TR/selectors/#lang-pseudo + "lang": markFunction( function( lang ) { + // lang value must be a valid identifier + if ( !ridentifier.test(lang || "") ) { + Sizzle.error( "unsupported lang: " + lang ); + } + lang = lang.replace( runescape, funescape ).toLowerCase(); + return function( elem ) { + var elemLang; + do { + if ( (elemLang = documentIsHTML ? + elem.lang : + elem.getAttribute("xml:lang") || elem.getAttribute("lang")) ) { + + elemLang = elemLang.toLowerCase(); + return elemLang === lang || elemLang.indexOf( lang + "-" ) === 0; + } + } while ( (elem = elem.parentNode) && elem.nodeType === 1 ); + return false; + }; + }), + + // Miscellaneous + "target": function( elem ) { + var hash = window.location && window.location.hash; + return hash && hash.slice( 1 ) === elem.id; + }, + + "root": function( elem ) { + return elem === docElem; + }, + + "focus": function( elem ) { + return elem === document.activeElement && (!document.hasFocus || document.hasFocus()) && !!(elem.type || elem.href || ~elem.tabIndex); + }, + + // Boolean properties + "enabled": function( elem ) { + return elem.disabled === false; + }, + + "disabled": function( elem ) { + return elem.disabled === true; + }, + + "checked": function( elem ) { + // In CSS3, :checked should return both checked and selected elements + // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked + var nodeName = elem.nodeName.toLowerCase(); + return (nodeName === "input" && !!elem.checked) || (nodeName === "option" && !!elem.selected); + }, + + "selected": function( elem ) { + // Accessing this property makes selected-by-default + // options in Safari work properly + if ( elem.parentNode ) { + elem.parentNode.selectedIndex; + } + + return elem.selected === true; + }, + + // Contents + "empty": function( elem ) { + // http://www.w3.org/TR/selectors/#empty-pseudo + // :empty is negated by element (1) or content nodes (text: 3; cdata: 4; entity ref: 5), + // but not by others (comment: 8; processing instruction: 7; etc.) + // nodeType < 6 works because attributes (2) do not appear as children + for ( elem = elem.firstChild; elem; elem = elem.nextSibling ) { + if ( elem.nodeType < 6 ) { + return false; + } + } + return true; + }, + + "parent": function( elem ) { + return !Expr.pseudos["empty"]( elem ); + }, + + // Element/input types + "header": function( elem ) { + return rheader.test( elem.nodeName ); + }, + + "input": function( elem ) { + return rinputs.test( elem.nodeName ); + }, + + "button": function( elem ) { + var name = elem.nodeName.toLowerCase(); + return name === "input" && elem.type === "button" || name === "button"; + }, + + "text": function( elem ) { + var attr; + return elem.nodeName.toLowerCase() === "input" && + elem.type === "text" && + + // Support: IE<8 + // New HTML5 attribute values (e.g., "search") appear with elem.type === "text" + ( (attr = elem.getAttribute("type")) == null || attr.toLowerCase() === "text" ); + }, + + // Position-in-collection + "first": createPositionalPseudo(function() { + return [ 0 ]; + }), + + "last": createPositionalPseudo(function( matchIndexes, length ) { + return [ length - 1 ]; + }), + + "eq": createPositionalPseudo(function( matchIndexes, length, argument ) { + return [ argument < 0 ? argument + length : argument ]; + }), + + "even": createPositionalPseudo(function( matchIndexes, length ) { + var i = 0; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "odd": createPositionalPseudo(function( matchIndexes, length ) { + var i = 1; + for ( ; i < length; i += 2 ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "lt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; --i >= 0; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }), + + "gt": createPositionalPseudo(function( matchIndexes, length, argument ) { + var i = argument < 0 ? argument + length : argument; + for ( ; ++i < length; ) { + matchIndexes.push( i ); + } + return matchIndexes; + }) + } +}; + +Expr.pseudos["nth"] = Expr.pseudos["eq"]; + +// Add button/input type pseudos +for ( i in { radio: true, checkbox: true, file: true, password: true, image: true } ) { + Expr.pseudos[ i ] = createInputPseudo( i ); +} +for ( i in { submit: true, reset: true } ) { + Expr.pseudos[ i ] = createButtonPseudo( i ); +} + +// Easy API for creating new setFilters +function setFilters() {} +setFilters.prototype = Expr.filters = Expr.pseudos; +Expr.setFilters = new setFilters(); + +tokenize = Sizzle.tokenize = function( selector, parseOnly ) { + var matched, match, tokens, type, + soFar, groups, preFilters, + cached = tokenCache[ selector + " " ]; + + if ( cached ) { + return parseOnly ? 0 : cached.slice( 0 ); + } + + soFar = selector; + groups = []; + preFilters = Expr.preFilter; + + while ( soFar ) { + + // Comma and first run + if ( !matched || (match = rcomma.exec( soFar )) ) { + if ( match ) { + // Don't consume trailing commas as valid + soFar = soFar.slice( match[0].length ) || soFar; + } + groups.push( (tokens = []) ); + } + + matched = false; + + // Combinators + if ( (match = rcombinators.exec( soFar )) ) { + matched = match.shift(); + tokens.push({ + value: matched, + // Cast descendant combinators to space + type: match[0].replace( rtrim, " " ) + }); + soFar = soFar.slice( matched.length ); + } + + // Filters + for ( type in Expr.filter ) { + if ( (match = matchExpr[ type ].exec( soFar )) && (!preFilters[ type ] || + (match = preFilters[ type ]( match ))) ) { + matched = match.shift(); + tokens.push({ + value: matched, + type: type, + matches: match + }); + soFar = soFar.slice( matched.length ); + } + } + + if ( !matched ) { + break; + } + } + + // Return the length of the invalid excess + // if we're just parsing + // Otherwise, throw an error or return tokens + return parseOnly ? + soFar.length : + soFar ? + Sizzle.error( selector ) : + // Cache the tokens + tokenCache( selector, groups ).slice( 0 ); +}; + +function toSelector( tokens ) { + var i = 0, + len = tokens.length, + selector = ""; + for ( ; i < len; i++ ) { + selector += tokens[i].value; + } + return selector; +} + +function addCombinator( matcher, combinator, base ) { + var dir = combinator.dir, + checkNonElements = base && dir === "parentNode", + doneName = done++; + + return combinator.first ? + // Check against closest ancestor/preceding element + function( elem, context, xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + return matcher( elem, context, xml ); + } + } + } : + + // Check against all ancestor/preceding elements + function( elem, context, xml ) { + var oldCache, outerCache, + newCache = [ dirruns, doneName ]; + + // We can't set arbitrary data on XML nodes, so they don't benefit from dir caching + if ( xml ) { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + if ( matcher( elem, context, xml ) ) { + return true; + } + } + } + } else { + while ( (elem = elem[ dir ]) ) { + if ( elem.nodeType === 1 || checkNonElements ) { + outerCache = elem[ expando ] || (elem[ expando ] = {}); + if ( (oldCache = outerCache[ dir ]) && + oldCache[ 0 ] === dirruns && oldCache[ 1 ] === doneName ) { + + // Assign to newCache so results back-propagate to previous elements + return (newCache[ 2 ] = oldCache[ 2 ]); + } else { + // Reuse newcache so results back-propagate to previous elements + outerCache[ dir ] = newCache; + + // A match means we're done; a fail means we have to keep checking + if ( (newCache[ 2 ] = matcher( elem, context, xml )) ) { + return true; + } + } + } + } + } + }; +} + +function elementMatcher( matchers ) { + return matchers.length > 1 ? + function( elem, context, xml ) { + var i = matchers.length; + while ( i-- ) { + if ( !matchers[i]( elem, context, xml ) ) { + return false; + } + } + return true; + } : + matchers[0]; +} + +function multipleContexts( selector, contexts, results ) { + var i = 0, + len = contexts.length; + for ( ; i < len; i++ ) { + Sizzle( selector, contexts[i], results ); + } + return results; +} + +function condense( unmatched, map, filter, context, xml ) { + var elem, + newUnmatched = [], + i = 0, + len = unmatched.length, + mapped = map != null; + + for ( ; i < len; i++ ) { + if ( (elem = unmatched[i]) ) { + if ( !filter || filter( elem, context, xml ) ) { + newUnmatched.push( elem ); + if ( mapped ) { + map.push( i ); + } + } + } + } + + return newUnmatched; +} + +function setMatcher( preFilter, selector, matcher, postFilter, postFinder, postSelector ) { + if ( postFilter && !postFilter[ expando ] ) { + postFilter = setMatcher( postFilter ); + } + if ( postFinder && !postFinder[ expando ] ) { + postFinder = setMatcher( postFinder, postSelector ); + } + return markFunction(function( seed, results, context, xml ) { + var temp, i, elem, + preMap = [], + postMap = [], + preexisting = results.length, + + // Get initial elements from seed or context + elems = seed || multipleContexts( selector || "*", context.nodeType ? [ context ] : context, [] ), + + // Prefilter to get matcher input, preserving a map for seed-results synchronization + matcherIn = preFilter && ( seed || !selector ) ? + condense( elems, preMap, preFilter, context, xml ) : + elems, + + matcherOut = matcher ? + // If we have a postFinder, or filtered seed, or non-seed postFilter or preexisting results, + postFinder || ( seed ? preFilter : preexisting || postFilter ) ? + + // ...intermediate processing is necessary + [] : + + // ...otherwise use results directly + results : + matcherIn; + + // Find primary matches + if ( matcher ) { + matcher( matcherIn, matcherOut, context, xml ); + } + + // Apply postFilter + if ( postFilter ) { + temp = condense( matcherOut, postMap ); + postFilter( temp, [], context, xml ); + + // Un-match failing elements by moving them back to matcherIn + i = temp.length; + while ( i-- ) { + if ( (elem = temp[i]) ) { + matcherOut[ postMap[i] ] = !(matcherIn[ postMap[i] ] = elem); + } + } + } + + if ( seed ) { + if ( postFinder || preFilter ) { + if ( postFinder ) { + // Get the final matcherOut by condensing this intermediate into postFinder contexts + temp = []; + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) ) { + // Restore matcherIn since elem is not yet a final match + temp.push( (matcherIn[i] = elem) ); + } + } + postFinder( null, (matcherOut = []), temp, xml ); + } + + // Move matched elements from seed to results to keep them synchronized + i = matcherOut.length; + while ( i-- ) { + if ( (elem = matcherOut[i]) && + (temp = postFinder ? indexOf.call( seed, elem ) : preMap[i]) > -1 ) { + + seed[temp] = !(results[temp] = elem); + } + } + } + + // Add elements to results, through postFinder if defined + } else { + matcherOut = condense( + matcherOut === results ? + matcherOut.splice( preexisting, matcherOut.length ) : + matcherOut + ); + if ( postFinder ) { + postFinder( null, results, matcherOut, xml ); + } else { + push.apply( results, matcherOut ); + } + } + }); +} + +function matcherFromTokens( tokens ) { + var checkContext, matcher, j, + len = tokens.length, + leadingRelative = Expr.relative[ tokens[0].type ], + implicitRelative = leadingRelative || Expr.relative[" "], + i = leadingRelative ? 1 : 0, + + // The foundational matcher ensures that elements are reachable from top-level context(s) + matchContext = addCombinator( function( elem ) { + return elem === checkContext; + }, implicitRelative, true ), + matchAnyContext = addCombinator( function( elem ) { + return indexOf.call( checkContext, elem ) > -1; + }, implicitRelative, true ), + matchers = [ function( elem, context, xml ) { + return ( !leadingRelative && ( xml || context !== outermostContext ) ) || ( + (checkContext = context).nodeType ? + matchContext( elem, context, xml ) : + matchAnyContext( elem, context, xml ) ); + } ]; + + for ( ; i < len; i++ ) { + if ( (matcher = Expr.relative[ tokens[i].type ]) ) { + matchers = [ addCombinator(elementMatcher( matchers ), matcher) ]; + } else { + matcher = Expr.filter[ tokens[i].type ].apply( null, tokens[i].matches ); + + // Return special upon seeing a positional matcher + if ( matcher[ expando ] ) { + // Find the next relative operator (if any) for proper handling + j = ++i; + for ( ; j < len; j++ ) { + if ( Expr.relative[ tokens[j].type ] ) { + break; + } + } + return setMatcher( + i > 1 && elementMatcher( matchers ), + i > 1 && toSelector( + // If the preceding token was a descendant combinator, insert an implicit any-element `*` + tokens.slice( 0, i - 1 ).concat({ value: tokens[ i - 2 ].type === " " ? "*" : "" }) + ).replace( rtrim, "$1" ), + matcher, + i < j && matcherFromTokens( tokens.slice( i, j ) ), + j < len && matcherFromTokens( (tokens = tokens.slice( j )) ), + j < len && toSelector( tokens ) + ); + } + matchers.push( matcher ); + } + } + + return elementMatcher( matchers ); +} + +function matcherFromGroupMatchers( elementMatchers, setMatchers ) { + var bySet = setMatchers.length > 0, + byElement = elementMatchers.length > 0, + superMatcher = function( seed, context, xml, results, outermost ) { + var elem, j, matcher, + matchedCount = 0, + i = "0", + unmatched = seed && [], + setMatched = [], + contextBackup = outermostContext, + // We must always have either seed elements or outermost context + elems = seed || byElement && Expr.find["TAG"]( "*", outermost ), + // Use integer dirruns iff this is the outermost matcher + dirrunsUnique = (dirruns += contextBackup == null ? 1 : Math.random() || 0.1), + len = elems.length; + + if ( outermost ) { + outermostContext = context !== document && context; + } + + // Add elements passing elementMatchers directly to results + // Keep `i` a string if there are no elements so `matchedCount` will be "00" below + // Support: IE<9, Safari + // Tolerate NodeList properties (IE: "length"; Safari: ) matching elements by id + for ( ; i !== len && (elem = elems[i]) != null; i++ ) { + if ( byElement && elem ) { + j = 0; + while ( (matcher = elementMatchers[j++]) ) { + if ( matcher( elem, context, xml ) ) { + results.push( elem ); + break; + } + } + if ( outermost ) { + dirruns = dirrunsUnique; + } + } + + // Track unmatched elements for set filters + if ( bySet ) { + // They will have gone through all possible matchers + if ( (elem = !matcher && elem) ) { + matchedCount--; + } + + // Lengthen the array for every element, matched or not + if ( seed ) { + unmatched.push( elem ); + } + } + } + + // Apply set filters to unmatched elements + matchedCount += i; + if ( bySet && i !== matchedCount ) { + j = 0; + while ( (matcher = setMatchers[j++]) ) { + matcher( unmatched, setMatched, context, xml ); + } + + if ( seed ) { + // Reintegrate element matches to eliminate the need for sorting + if ( matchedCount > 0 ) { + while ( i-- ) { + if ( !(unmatched[i] || setMatched[i]) ) { + setMatched[i] = pop.call( results ); + } + } + } + + // Discard index placeholder values to get only actual matches + setMatched = condense( setMatched ); + } + + // Add matches to results + push.apply( results, setMatched ); + + // Seedless set matches succeeding multiple successful matchers stipulate sorting + if ( outermost && !seed && setMatched.length > 0 && + ( matchedCount + setMatchers.length ) > 1 ) { + + Sizzle.uniqueSort( results ); + } + } + + // Override manipulation of globals by nested matchers + if ( outermost ) { + dirruns = dirrunsUnique; + outermostContext = contextBackup; + } + + return unmatched; + }; + + return bySet ? + markFunction( superMatcher ) : + superMatcher; +} + +compile = Sizzle.compile = function( selector, match /* Internal Use Only */ ) { + var i, + setMatchers = [], + elementMatchers = [], + cached = compilerCache[ selector + " " ]; + + if ( !cached ) { + // Generate a function of recursive functions that can be used to check each element + if ( !match ) { + match = tokenize( selector ); + } + i = match.length; + while ( i-- ) { + cached = matcherFromTokens( match[i] ); + if ( cached[ expando ] ) { + setMatchers.push( cached ); + } else { + elementMatchers.push( cached ); + } + } + + // Cache the compiled function + cached = compilerCache( selector, matcherFromGroupMatchers( elementMatchers, setMatchers ) ); + + // Save selector and tokenization + cached.selector = selector; + } + return cached; +}; + +/** + * A low-level selection function that works with Sizzle's compiled + * selector functions + * @param {String|Function} selector A selector or a pre-compiled + * selector function built with Sizzle.compile + * @param {Element} context + * @param {Array} [results] + * @param {Array} [seed] A set of elements to match against + */ +select = Sizzle.select = function( selector, context, results, seed ) { + var i, tokens, token, type, find, + compiled = typeof selector === "function" && selector, + match = !seed && tokenize( (selector = compiled.selector || selector) ); + + results = results || []; + + // Try to minimize operations if there is no seed and only one group + if ( match.length === 1 ) { + + // Take a shortcut and set the context if the root selector is an ID + tokens = match[0] = match[0].slice( 0 ); + if ( tokens.length > 2 && (token = tokens[0]).type === "ID" && + support.getById && context.nodeType === 9 && documentIsHTML && + Expr.relative[ tokens[1].type ] ) { + + context = ( Expr.find["ID"]( token.matches[0].replace(runescape, funescape), context ) || [] )[0]; + if ( !context ) { + return results; + + // Precompiled matchers will still verify ancestry, so step up a level + } else if ( compiled ) { + context = context.parentNode; + } + + selector = selector.slice( tokens.shift().value.length ); + } + + // Fetch a seed set for right-to-left matching + i = matchExpr["needsContext"].test( selector ) ? 0 : tokens.length; + while ( i-- ) { + token = tokens[i]; + + // Abort if we hit a combinator + if ( Expr.relative[ (type = token.type) ] ) { + break; + } + if ( (find = Expr.find[ type ]) ) { + // Search, expanding context for leading sibling combinators + if ( (seed = find( + token.matches[0].replace( runescape, funescape ), + rsibling.test( tokens[0].type ) && testContext( context.parentNode ) || context + )) ) { + + // If seed is empty or no tokens remain, we can return early + tokens.splice( i, 1 ); + selector = seed.length && toSelector( tokens ); + if ( !selector ) { + push.apply( results, seed ); + return results; + } + + break; + } + } + } + } + + // Compile and execute a filtering function if one is not provided + // Provide `match` to avoid retokenization if we modified the selector above + ( compiled || compile( selector, match ) )( + seed, + context, + !documentIsHTML, + results, + rsibling.test( selector ) && testContext( context.parentNode ) || context + ); + return results; +}; + +// One-time assignments + +// Sort stability +support.sortStable = expando.split("").sort( sortOrder ).join("") === expando; + +// Support: Chrome<14 +// Always assume duplicates if they aren't passed to the comparison function +support.detectDuplicates = !!hasDuplicate; + +// Initialize against the default document +setDocument(); + +// Support: Webkit<537.32 - Safari 6.0.3/Chrome 25 (fixed in Chrome 27) +// Detached nodes confoundingly follow *each other* +support.sortDetached = assert(function( div1 ) { + // Should return 1, but returns 4 (following) + return div1.compareDocumentPosition( document.createElement("div") ) & 1; +}); + +// Support: IE<8 +// Prevent attribute/property "interpolation" +// http://msdn.microsoft.com/en-us/library/ms536429%28VS.85%29.aspx +if ( !assert(function( div ) { + div.innerHTML = ""; + return div.firstChild.getAttribute("href") === "#" ; +}) ) { + addHandle( "type|href|height|width", function( elem, name, isXML ) { + if ( !isXML ) { + return elem.getAttribute( name, name.toLowerCase() === "type" ? 1 : 2 ); + } + }); +} + +// Support: IE<9 +// Use defaultValue in place of getAttribute("value") +if ( !support.attributes || !assert(function( div ) { + div.innerHTML = ""; + div.firstChild.setAttribute( "value", "" ); + return div.firstChild.getAttribute( "value" ) === ""; +}) ) { + addHandle( "value", function( elem, name, isXML ) { + if ( !isXML && elem.nodeName.toLowerCase() === "input" ) { + return elem.defaultValue; + } + }); +} + +// Support: IE<9 +// Use getAttributeNode to fetch booleans when getAttribute lies +if ( !assert(function( div ) { + return div.getAttribute("disabled") == null; +}) ) { + addHandle( booleans, function( elem, name, isXML ) { + var val; + if ( !isXML ) { + return elem[ name ] === true ? name.toLowerCase() : + (val = elem.getAttributeNode( name )) && val.specified ? + val.value : + null; + } + }); +} + +return Sizzle; + +})( window ); + + + +jQuery.find = Sizzle; +jQuery.expr = Sizzle.selectors; +jQuery.expr[":"] = jQuery.expr.pseudos; +jQuery.unique = Sizzle.uniqueSort; +jQuery.text = Sizzle.getText; +jQuery.isXMLDoc = Sizzle.isXML; +jQuery.contains = Sizzle.contains; + + + +var rneedsContext = jQuery.expr.match.needsContext; + +var rsingleTag = (/^<(\w+)\s*\/?>(?:<\/\1>|)$/); + + + +var risSimple = /^.[^:#\[\.,]*$/; + +// Implement the identical functionality for filter and not +function winnow( elements, qualifier, not ) { + if ( jQuery.isFunction( qualifier ) ) { + return jQuery.grep( elements, function( elem, i ) { + /* jshint -W018 */ + return !!qualifier.call( elem, i, elem ) !== not; + }); + + } + + if ( qualifier.nodeType ) { + return jQuery.grep( elements, function( elem ) { + return ( elem === qualifier ) !== not; + }); + + } + + if ( typeof qualifier === "string" ) { + if ( risSimple.test( qualifier ) ) { + return jQuery.filter( qualifier, elements, not ); + } + + qualifier = jQuery.filter( qualifier, elements ); + } + + return jQuery.grep( elements, function( elem ) { + return ( jQuery.inArray( elem, qualifier ) >= 0 ) !== not; + }); +} + +jQuery.filter = function( expr, elems, not ) { + var elem = elems[ 0 ]; + + if ( not ) { + expr = ":not(" + expr + ")"; + } + + return elems.length === 1 && elem.nodeType === 1 ? + jQuery.find.matchesSelector( elem, expr ) ? [ elem ] : [] : + jQuery.find.matches( expr, jQuery.grep( elems, function( elem ) { + return elem.nodeType === 1; + })); +}; + +jQuery.fn.extend({ + find: function( selector ) { + var i, + ret = [], + self = this, + len = self.length; + + if ( typeof selector !== "string" ) { + return this.pushStack( jQuery( selector ).filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( self[ i ], this ) ) { + return true; + } + } + }) ); + } + + for ( i = 0; i < len; i++ ) { + jQuery.find( selector, self[ i ], ret ); + } + + // Needed because $( selector, context ) becomes $( context ).find( selector ) + ret = this.pushStack( len > 1 ? jQuery.unique( ret ) : ret ); + ret.selector = this.selector ? this.selector + " " + selector : selector; + return ret; + }, + filter: function( selector ) { + return this.pushStack( winnow(this, selector || [], false) ); + }, + not: function( selector ) { + return this.pushStack( winnow(this, selector || [], true) ); + }, + is: function( selector ) { + return !!winnow( + this, + + // If this is a positional/relative selector, check membership in the returned set + // so $("p:first").is("p:last") won't return true for a doc with two "p". + typeof selector === "string" && rneedsContext.test( selector ) ? + jQuery( selector ) : + selector || [], + false + ).length; + } +}); + + +// Initialize a jQuery object + + +// A central reference to the root jQuery(document) +var rootjQuery, + + // Use the correct document accordingly with window argument (sandbox) + document = window.document, + + // A simple way to check for HTML strings + // Prioritize #id over to avoid XSS via location.hash (#9521) + // Strict HTML recognition (#11290: must start with <) + rquickExpr = /^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/, + + init = jQuery.fn.init = function( selector, context ) { + var match, elem; + + // HANDLE: $(""), $(null), $(undefined), $(false) + if ( !selector ) { + return this; + } + + // Handle HTML strings + if ( typeof selector === "string" ) { + if ( selector.charAt(0) === "<" && selector.charAt( selector.length - 1 ) === ">" && selector.length >= 3 ) { + // Assume that strings that start and end with <> are HTML and skip the regex check + match = [ null, selector, null ]; + + } else { + match = rquickExpr.exec( selector ); + } + + // Match html or make sure no context is specified for #id + if ( match && (match[1] || !context) ) { + + // HANDLE: $(html) -> $(array) + if ( match[1] ) { + context = context instanceof jQuery ? context[0] : context; + + // scripts is true for back-compat + // Intentionally let the error be thrown if parseHTML is not present + jQuery.merge( this, jQuery.parseHTML( + match[1], + context && context.nodeType ? context.ownerDocument || context : document, + true + ) ); + + // HANDLE: $(html, props) + if ( rsingleTag.test( match[1] ) && jQuery.isPlainObject( context ) ) { + for ( match in context ) { + // Properties of context are called as methods if possible + if ( jQuery.isFunction( this[ match ] ) ) { + this[ match ]( context[ match ] ); + + // ...and otherwise set as attributes + } else { + this.attr( match, context[ match ] ); + } + } + } + + return this; + + // HANDLE: $(#id) + } else { + elem = document.getElementById( match[2] ); + + // Check parentNode to catch when Blackberry 4.6 returns + // nodes that are no longer in the document #6963 + if ( elem && elem.parentNode ) { + // Handle the case where IE and Opera return items + // by name instead of ID + if ( elem.id !== match[2] ) { + return rootjQuery.find( selector ); + } + + // Otherwise, we inject the element directly into the jQuery object + this.length = 1; + this[0] = elem; + } + + this.context = document; + this.selector = selector; + return this; + } + + // HANDLE: $(expr, $(...)) + } else if ( !context || context.jquery ) { + return ( context || rootjQuery ).find( selector ); + + // HANDLE: $(expr, context) + // (which is just equivalent to: $(context).find(expr) + } else { + return this.constructor( context ).find( selector ); + } + + // HANDLE: $(DOMElement) + } else if ( selector.nodeType ) { + this.context = this[0] = selector; + this.length = 1; + return this; + + // HANDLE: $(function) + // Shortcut for document ready + } else if ( jQuery.isFunction( selector ) ) { + return typeof rootjQuery.ready !== "undefined" ? + rootjQuery.ready( selector ) : + // Execute immediately if ready is not present + selector( jQuery ); + } + + if ( selector.selector !== undefined ) { + this.selector = selector.selector; + this.context = selector.context; + } + + return jQuery.makeArray( selector, this ); + }; + +// Give the init function the jQuery prototype for later instantiation +init.prototype = jQuery.fn; + +// Initialize central reference +rootjQuery = jQuery( document ); + + +var rparentsprev = /^(?:parents|prev(?:Until|All))/, + // methods guaranteed to produce a unique set when starting from a unique set + guaranteedUnique = { + children: true, + contents: true, + next: true, + prev: true + }; + +jQuery.extend({ + dir: function( elem, dir, until ) { + var matched = [], + cur = elem[ dir ]; + + while ( cur && cur.nodeType !== 9 && (until === undefined || cur.nodeType !== 1 || !jQuery( cur ).is( until )) ) { + if ( cur.nodeType === 1 ) { + matched.push( cur ); + } + cur = cur[dir]; + } + return matched; + }, + + sibling: function( n, elem ) { + var r = []; + + for ( ; n; n = n.nextSibling ) { + if ( n.nodeType === 1 && n !== elem ) { + r.push( n ); + } + } + + return r; + } +}); + +jQuery.fn.extend({ + has: function( target ) { + var i, + targets = jQuery( target, this ), + len = targets.length; + + return this.filter(function() { + for ( i = 0; i < len; i++ ) { + if ( jQuery.contains( this, targets[i] ) ) { + return true; + } + } + }); + }, + + closest: function( selectors, context ) { + var cur, + i = 0, + l = this.length, + matched = [], + pos = rneedsContext.test( selectors ) || typeof selectors !== "string" ? + jQuery( selectors, context || this.context ) : + 0; + + for ( ; i < l; i++ ) { + for ( cur = this[i]; cur && cur !== context; cur = cur.parentNode ) { + // Always skip document fragments + if ( cur.nodeType < 11 && (pos ? + pos.index(cur) > -1 : + + // Don't pass non-elements to Sizzle + cur.nodeType === 1 && + jQuery.find.matchesSelector(cur, selectors)) ) { + + matched.push( cur ); + break; + } + } + } + + return this.pushStack( matched.length > 1 ? jQuery.unique( matched ) : matched ); + }, + + // Determine the position of an element within + // the matched set of elements + index: function( elem ) { + + // No argument, return index in parent + if ( !elem ) { + return ( this[0] && this[0].parentNode ) ? this.first().prevAll().length : -1; + } + + // index in selector + if ( typeof elem === "string" ) { + return jQuery.inArray( this[0], jQuery( elem ) ); + } + + // Locate the position of the desired element + return jQuery.inArray( + // If it receives a jQuery object, the first element is used + elem.jquery ? elem[0] : elem, this ); + }, + + add: function( selector, context ) { + return this.pushStack( + jQuery.unique( + jQuery.merge( this.get(), jQuery( selector, context ) ) + ) + ); + }, + + addBack: function( selector ) { + return this.add( selector == null ? + this.prevObject : this.prevObject.filter(selector) + ); + } +}); + +function sibling( cur, dir ) { + do { + cur = cur[ dir ]; + } while ( cur && cur.nodeType !== 1 ); + + return cur; +} + +jQuery.each({ + parent: function( elem ) { + var parent = elem.parentNode; + return parent && parent.nodeType !== 11 ? parent : null; + }, + parents: function( elem ) { + return jQuery.dir( elem, "parentNode" ); + }, + parentsUntil: function( elem, i, until ) { + return jQuery.dir( elem, "parentNode", until ); + }, + next: function( elem ) { + return sibling( elem, "nextSibling" ); + }, + prev: function( elem ) { + return sibling( elem, "previousSibling" ); + }, + nextAll: function( elem ) { + return jQuery.dir( elem, "nextSibling" ); + }, + prevAll: function( elem ) { + return jQuery.dir( elem, "previousSibling" ); + }, + nextUntil: function( elem, i, until ) { + return jQuery.dir( elem, "nextSibling", until ); + }, + prevUntil: function( elem, i, until ) { + return jQuery.dir( elem, "previousSibling", until ); + }, + siblings: function( elem ) { + return jQuery.sibling( ( elem.parentNode || {} ).firstChild, elem ); + }, + children: function( elem ) { + return jQuery.sibling( elem.firstChild ); + }, + contents: function( elem ) { + return jQuery.nodeName( elem, "iframe" ) ? + elem.contentDocument || elem.contentWindow.document : + jQuery.merge( [], elem.childNodes ); + } +}, function( name, fn ) { + jQuery.fn[ name ] = function( until, selector ) { + var ret = jQuery.map( this, fn, until ); + + if ( name.slice( -5 ) !== "Until" ) { + selector = until; + } + + if ( selector && typeof selector === "string" ) { + ret = jQuery.filter( selector, ret ); + } + + if ( this.length > 1 ) { + // Remove duplicates + if ( !guaranteedUnique[ name ] ) { + ret = jQuery.unique( ret ); + } + + // Reverse order for parents* and prev-derivatives + if ( rparentsprev.test( name ) ) { + ret = ret.reverse(); + } + } + + return this.pushStack( ret ); + }; +}); +var rnotwhite = (/\S+/g); + + + +// String to Object options format cache +var optionsCache = {}; + +// Convert String-formatted options into Object-formatted ones and store in cache +function createOptions( options ) { + var object = optionsCache[ options ] = {}; + jQuery.each( options.match( rnotwhite ) || [], function( _, flag ) { + object[ flag ] = true; + }); + return object; +} + +/* + * Create a callback list using the following parameters: + * + * options: an optional list of space-separated options that will change how + * the callback list behaves or a more traditional option object + * + * By default a callback list will act like an event callback list and can be + * "fired" multiple times. + * + * Possible options: + * + * once: will ensure the callback list can only be fired once (like a Deferred) + * + * memory: will keep track of previous values and will call any callback added + * after the list has been fired right away with the latest "memorized" + * values (like a Deferred) + * + * unique: will ensure a callback can only be added once (no duplicate in the list) + * + * stopOnFalse: interrupt callings when a callback returns false + * + */ +jQuery.Callbacks = function( options ) { + + // Convert options from String-formatted to Object-formatted if needed + // (we check in cache first) + options = typeof options === "string" ? + ( optionsCache[ options ] || createOptions( options ) ) : + jQuery.extend( {}, options ); + + var // Flag to know if list is currently firing + firing, + // Last fire value (for non-forgettable lists) + memory, + // Flag to know if list was already fired + fired, + // End of the loop when firing + firingLength, + // Index of currently firing callback (modified by remove if needed) + firingIndex, + // First callback to fire (used internally by add and fireWith) + firingStart, + // Actual callback list + list = [], + // Stack of fire calls for repeatable lists + stack = !options.once && [], + // Fire callbacks + fire = function( data ) { + memory = options.memory && data; + fired = true; + firingIndex = firingStart || 0; + firingStart = 0; + firingLength = list.length; + firing = true; + for ( ; list && firingIndex < firingLength; firingIndex++ ) { + if ( list[ firingIndex ].apply( data[ 0 ], data[ 1 ] ) === false && options.stopOnFalse ) { + memory = false; // To prevent further calls using add + break; + } + } + firing = false; + if ( list ) { + if ( stack ) { + if ( stack.length ) { + fire( stack.shift() ); + } + } else if ( memory ) { + list = []; + } else { + self.disable(); + } + } + }, + // Actual Callbacks object + self = { + // Add a callback or a collection of callbacks to the list + add: function() { + if ( list ) { + // First, we save the current length + var start = list.length; + (function add( args ) { + jQuery.each( args, function( _, arg ) { + var type = jQuery.type( arg ); + if ( type === "function" ) { + if ( !options.unique || !self.has( arg ) ) { + list.push( arg ); + } + } else if ( arg && arg.length && type !== "string" ) { + // Inspect recursively + add( arg ); + } + }); + })( arguments ); + // Do we need to add the callbacks to the + // current firing batch? + if ( firing ) { + firingLength = list.length; + // With memory, if we're not firing then + // we should call right away + } else if ( memory ) { + firingStart = start; + fire( memory ); + } + } + return this; + }, + // Remove a callback from the list + remove: function() { + if ( list ) { + jQuery.each( arguments, function( _, arg ) { + var index; + while ( ( index = jQuery.inArray( arg, list, index ) ) > -1 ) { + list.splice( index, 1 ); + // Handle firing indexes + if ( firing ) { + if ( index <= firingLength ) { + firingLength--; + } + if ( index <= firingIndex ) { + firingIndex--; + } + } + } + }); + } + return this; + }, + // Check if a given callback is in the list. + // If no argument is given, return whether or not list has callbacks attached. + has: function( fn ) { + return fn ? jQuery.inArray( fn, list ) > -1 : !!( list && list.length ); + }, + // Remove all callbacks from the list + empty: function() { + list = []; + firingLength = 0; + return this; + }, + // Have the list do nothing anymore + disable: function() { + list = stack = memory = undefined; + return this; + }, + // Is it disabled? + disabled: function() { + return !list; + }, + // Lock the list in its current state + lock: function() { + stack = undefined; + if ( !memory ) { + self.disable(); + } + return this; + }, + // Is it locked? + locked: function() { + return !stack; + }, + // Call all callbacks with the given context and arguments + fireWith: function( context, args ) { + if ( list && ( !fired || stack ) ) { + args = args || []; + args = [ context, args.slice ? args.slice() : args ]; + if ( firing ) { + stack.push( args ); + } else { + fire( args ); + } + } + return this; + }, + // Call all the callbacks with the given arguments + fire: function() { + self.fireWith( this, arguments ); + return this; + }, + // To know if the callbacks have already been called at least once + fired: function() { + return !!fired; + } + }; + + return self; +}; + + +jQuery.extend({ + + Deferred: function( func ) { + var tuples = [ + // action, add listener, listener list, final state + [ "resolve", "done", jQuery.Callbacks("once memory"), "resolved" ], + [ "reject", "fail", jQuery.Callbacks("once memory"), "rejected" ], + [ "notify", "progress", jQuery.Callbacks("memory") ] + ], + state = "pending", + promise = { + state: function() { + return state; + }, + always: function() { + deferred.done( arguments ).fail( arguments ); + return this; + }, + then: function( /* fnDone, fnFail, fnProgress */ ) { + var fns = arguments; + return jQuery.Deferred(function( newDefer ) { + jQuery.each( tuples, function( i, tuple ) { + var fn = jQuery.isFunction( fns[ i ] ) && fns[ i ]; + // deferred[ done | fail | progress ] for forwarding actions to newDefer + deferred[ tuple[1] ](function() { + var returned = fn && fn.apply( this, arguments ); + if ( returned && jQuery.isFunction( returned.promise ) ) { + returned.promise() + .done( newDefer.resolve ) + .fail( newDefer.reject ) + .progress( newDefer.notify ); + } else { + newDefer[ tuple[ 0 ] + "With" ]( this === promise ? newDefer.promise() : this, fn ? [ returned ] : arguments ); + } + }); + }); + fns = null; + }).promise(); + }, + // Get a promise for this deferred + // If obj is provided, the promise aspect is added to the object + promise: function( obj ) { + return obj != null ? jQuery.extend( obj, promise ) : promise; + } + }, + deferred = {}; + + // Keep pipe for back-compat + promise.pipe = promise.then; + + // Add list-specific methods + jQuery.each( tuples, function( i, tuple ) { + var list = tuple[ 2 ], + stateString = tuple[ 3 ]; + + // promise[ done | fail | progress ] = list.add + promise[ tuple[1] ] = list.add; + + // Handle state + if ( stateString ) { + list.add(function() { + // state = [ resolved | rejected ] + state = stateString; + + // [ reject_list | resolve_list ].disable; progress_list.lock + }, tuples[ i ^ 1 ][ 2 ].disable, tuples[ 2 ][ 2 ].lock ); + } + + // deferred[ resolve | reject | notify ] + deferred[ tuple[0] ] = function() { + deferred[ tuple[0] + "With" ]( this === deferred ? promise : this, arguments ); + return this; + }; + deferred[ tuple[0] + "With" ] = list.fireWith; + }); + + // Make the deferred a promise + promise.promise( deferred ); + + // Call given func if any + if ( func ) { + func.call( deferred, deferred ); + } + + // All done! + return deferred; + }, + + // Deferred helper + when: function( subordinate /* , ..., subordinateN */ ) { + var i = 0, + resolveValues = slice.call( arguments ), + length = resolveValues.length, + + // the count of uncompleted subordinates + remaining = length !== 1 || ( subordinate && jQuery.isFunction( subordinate.promise ) ) ? length : 0, + + // the master Deferred. If resolveValues consist of only a single Deferred, just use that. + deferred = remaining === 1 ? subordinate : jQuery.Deferred(), + + // Update function for both resolve and progress values + updateFunc = function( i, contexts, values ) { + return function( value ) { + contexts[ i ] = this; + values[ i ] = arguments.length > 1 ? slice.call( arguments ) : value; + if ( values === progressValues ) { + deferred.notifyWith( contexts, values ); + + } else if ( !(--remaining) ) { + deferred.resolveWith( contexts, values ); + } + }; + }, + + progressValues, progressContexts, resolveContexts; + + // add listeners to Deferred subordinates; treat others as resolved + if ( length > 1 ) { + progressValues = new Array( length ); + progressContexts = new Array( length ); + resolveContexts = new Array( length ); + for ( ; i < length; i++ ) { + if ( resolveValues[ i ] && jQuery.isFunction( resolveValues[ i ].promise ) ) { + resolveValues[ i ].promise() + .done( updateFunc( i, resolveContexts, resolveValues ) ) + .fail( deferred.reject ) + .progress( updateFunc( i, progressContexts, progressValues ) ); + } else { + --remaining; + } + } + } + + // if we're not waiting on anything, resolve the master + if ( !remaining ) { + deferred.resolveWith( resolveContexts, resolveValues ); + } + + return deferred.promise(); + } +}); + + +// The deferred used on DOM ready +var readyList; + +jQuery.fn.ready = function( fn ) { + // Add the callback + jQuery.ready.promise().done( fn ); + + return this; +}; + +jQuery.extend({ + // Is the DOM ready to be used? Set to true once it occurs. + isReady: false, + + // A counter to track how many items to wait for before + // the ready event fires. See #6781 + readyWait: 1, + + // Hold (or release) the ready event + holdReady: function( hold ) { + if ( hold ) { + jQuery.readyWait++; + } else { + jQuery.ready( true ); + } + }, + + // Handle when the DOM is ready + ready: function( wait ) { + + // Abort if there are pending holds or we're already ready + if ( wait === true ? --jQuery.readyWait : jQuery.isReady ) { + return; + } + + // Make sure body exists, at least, in case IE gets a little overzealous (ticket #5443). + if ( !document.body ) { + return setTimeout( jQuery.ready ); + } + + // Remember that the DOM is ready + jQuery.isReady = true; + + // If a normal DOM Ready event fired, decrement, and wait if need be + if ( wait !== true && --jQuery.readyWait > 0 ) { + return; + } + + // If there are functions bound, to execute + readyList.resolveWith( document, [ jQuery ] ); + + // Trigger any bound ready events + if ( jQuery.fn.triggerHandler ) { + jQuery( document ).triggerHandler( "ready" ); + jQuery( document ).off( "ready" ); + } + } +}); + +/** + * Clean-up method for dom ready events + */ +function detach() { + if ( document.addEventListener ) { + document.removeEventListener( "DOMContentLoaded", completed, false ); + window.removeEventListener( "load", completed, false ); + + } else { + document.detachEvent( "onreadystatechange", completed ); + window.detachEvent( "onload", completed ); + } +} + +/** + * The ready event handler and self cleanup method + */ +function completed() { + // readyState === "complete" is good enough for us to call the dom ready in oldIE + if ( document.addEventListener || event.type === "load" || document.readyState === "complete" ) { + detach(); + jQuery.ready(); + } +} + +jQuery.ready.promise = function( obj ) { + if ( !readyList ) { + + readyList = jQuery.Deferred(); + + // Catch cases where $(document).ready() is called after the browser event has already occurred. + // we once tried to use readyState "interactive" here, but it caused issues like the one + // discovered by ChrisS here: http://bugs.jquery.com/ticket/12282#comment:15 + if ( document.readyState === "complete" ) { + // Handle it asynchronously to allow scripts the opportunity to delay ready + setTimeout( jQuery.ready ); + + // Standards-based browsers support DOMContentLoaded + } else if ( document.addEventListener ) { + // Use the handy event callback + document.addEventListener( "DOMContentLoaded", completed, false ); + + // A fallback to window.onload, that will always work + window.addEventListener( "load", completed, false ); + + // If IE event model is used + } else { + // Ensure firing before onload, maybe late but safe also for iframes + document.attachEvent( "onreadystatechange", completed ); + + // A fallback to window.onload, that will always work + window.attachEvent( "onload", completed ); + + // If IE and not a frame + // continually check to see if the document is ready + var top = false; + + try { + top = window.frameElement == null && document.documentElement; + } catch(e) {} + + if ( top && top.doScroll ) { + (function doScrollCheck() { + if ( !jQuery.isReady ) { + + try { + // Use the trick by Diego Perini + // http://javascript.nwbox.com/IEContentLoaded/ + top.doScroll("left"); + } catch(e) { + return setTimeout( doScrollCheck, 50 ); + } + + // detach all dom ready events + detach(); + + // and execute any waiting functions + jQuery.ready(); + } + })(); + } + } + } + return readyList.promise( obj ); +}; + + +var strundefined = typeof undefined; + + + +// Support: IE<9 +// Iteration over object's inherited properties before its own +var i; +for ( i in jQuery( support ) ) { + break; +} +support.ownLast = i !== "0"; + +// Note: most support tests are defined in their respective modules. +// false until the test is run +support.inlineBlockNeedsLayout = false; + +// Execute ASAP in case we need to set body.style.zoom +jQuery(function() { + // Minified: var a,b,c,d + var val, div, body, container; + + body = document.getElementsByTagName( "body" )[ 0 ]; + if ( !body || !body.style ) { + // Return for frameset docs that don't have a body + return; + } + + // Setup + div = document.createElement( "div" ); + container = document.createElement( "div" ); + container.style.cssText = "position:absolute;border:0;width:0;height:0;top:0;left:-9999px"; + body.appendChild( container ).appendChild( div ); + + if ( typeof div.style.zoom !== strundefined ) { + // Support: IE<8 + // Check if natively block-level elements act like inline-block + // elements when setting their display to 'inline' and giving + // them layout + div.style.cssText = "display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1"; + + support.inlineBlockNeedsLayout = val = div.offsetWidth === 3; + if ( val ) { + // Prevent IE 6 from affecting layout for positioned elements #11048 + // Prevent IE from shrinking the body in IE 7 mode #12869 + // Support: IE<8 + body.style.zoom = 1; + } + } + + body.removeChild( container ); +}); + + + + +(function() { + var div = document.createElement( "div" ); + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +/** + * Determines whether an object can have data + */ +jQuery.acceptData = function( elem ) { + var noData = jQuery.noData[ (elem.nodeName + " ").toLowerCase() ], + nodeType = +elem.nodeType || 1; + + // Do not set data on non-element DOM nodes because it will not be cleared (#8335). + return nodeType !== 1 && nodeType !== 9 ? + false : + + // Nodes accept data unless otherwise specified; rejection can be conditional + !noData || noData !== true && elem.getAttribute("classid") === noData; +}; + + +var rbrace = /^(?:\{[\w\W]*\}|\[[\w\W]*\])$/, + rmultiDash = /([A-Z])/g; + +function dataAttr( elem, key, data ) { + // If nothing was found internally, try to fetch any + // data from the HTML5 data-* attribute + if ( data === undefined && elem.nodeType === 1 ) { + + var name = "data-" + key.replace( rmultiDash, "-$1" ).toLowerCase(); + + data = elem.getAttribute( name ); + + if ( typeof data === "string" ) { + try { + data = data === "true" ? true : + data === "false" ? false : + data === "null" ? null : + // Only convert to a number if it doesn't change the string + +data + "" === data ? +data : + rbrace.test( data ) ? jQuery.parseJSON( data ) : + data; + } catch( e ) {} + + // Make sure we set the data so it isn't changed later + jQuery.data( elem, key, data ); + + } else { + data = undefined; + } + } + + return data; +} + +// checks a cache object for emptiness +function isEmptyDataObject( obj ) { + var name; + for ( name in obj ) { + + // if the public data object is empty, the private is still empty + if ( name === "data" && jQuery.isEmptyObject( obj[name] ) ) { + continue; + } + if ( name !== "toJSON" ) { + return false; + } + } + + return true; +} + +function internalData( elem, name, data, pvt /* Internal Use Only */ ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var ret, thisCache, + internalKey = jQuery.expando, + + // We have to handle DOM nodes and JS objects differently because IE6-7 + // can't GC object references properly across the DOM-JS boundary + isNode = elem.nodeType, + + // Only DOM nodes need the global jQuery cache; JS object data is + // attached directly to the object so GC can occur automatically + cache = isNode ? jQuery.cache : elem, + + // Only defining an ID for JS objects if its cache already exists allows + // the code to shortcut on the same path as a DOM node with no cache + id = isNode ? elem[ internalKey ] : elem[ internalKey ] && internalKey; + + // Avoid doing any more work than we need to when trying to get data on an + // object that has no data at all + if ( (!id || !cache[id] || (!pvt && !cache[id].data)) && data === undefined && typeof name === "string" ) { + return; + } + + if ( !id ) { + // Only DOM nodes need a new unique ID for each element since their data + // ends up in the global cache + if ( isNode ) { + id = elem[ internalKey ] = deletedIds.pop() || jQuery.guid++; + } else { + id = internalKey; + } + } + + if ( !cache[ id ] ) { + // Avoid exposing jQuery metadata on plain JS objects when the object + // is serialized using JSON.stringify + cache[ id ] = isNode ? {} : { toJSON: jQuery.noop }; + } + + // An object can be passed to jQuery.data instead of a key/value pair; this gets + // shallow copied over onto the existing cache + if ( typeof name === "object" || typeof name === "function" ) { + if ( pvt ) { + cache[ id ] = jQuery.extend( cache[ id ], name ); + } else { + cache[ id ].data = jQuery.extend( cache[ id ].data, name ); + } + } + + thisCache = cache[ id ]; + + // jQuery data() is stored in a separate object inside the object's internal data + // cache in order to avoid key collisions between internal data and user-defined + // data. + if ( !pvt ) { + if ( !thisCache.data ) { + thisCache.data = {}; + } + + thisCache = thisCache.data; + } + + if ( data !== undefined ) { + thisCache[ jQuery.camelCase( name ) ] = data; + } + + // Check for both converted-to-camel and non-converted data property names + // If a data property was specified + if ( typeof name === "string" ) { + + // First Try to find as-is property data + ret = thisCache[ name ]; + + // Test for null|undefined property data + if ( ret == null ) { + + // Try to find the camelCased property + ret = thisCache[ jQuery.camelCase( name ) ]; + } + } else { + ret = thisCache; + } + + return ret; +} + +function internalRemoveData( elem, name, pvt ) { + if ( !jQuery.acceptData( elem ) ) { + return; + } + + var thisCache, i, + isNode = elem.nodeType, + + // See jQuery.data for more information + cache = isNode ? jQuery.cache : elem, + id = isNode ? elem[ jQuery.expando ] : jQuery.expando; + + // If there is already no cache entry for this object, there is no + // purpose in continuing + if ( !cache[ id ] ) { + return; + } + + if ( name ) { + + thisCache = pvt ? cache[ id ] : cache[ id ].data; + + if ( thisCache ) { + + // Support array or space separated string names for data keys + if ( !jQuery.isArray( name ) ) { + + // try the string as a key before any manipulation + if ( name in thisCache ) { + name = [ name ]; + } else { + + // split the camel cased version by spaces unless a key with the spaces exists + name = jQuery.camelCase( name ); + if ( name in thisCache ) { + name = [ name ]; + } else { + name = name.split(" "); + } + } + } else { + // If "name" is an array of keys... + // When data is initially created, via ("key", "val") signature, + // keys will be converted to camelCase. + // Since there is no way to tell _how_ a key was added, remove + // both plain key and camelCase key. #12786 + // This will only penalize the array argument path. + name = name.concat( jQuery.map( name, jQuery.camelCase ) ); + } + + i = name.length; + while ( i-- ) { + delete thisCache[ name[i] ]; + } + + // If there is no data left in the cache, we want to continue + // and let the cache object itself get destroyed + if ( pvt ? !isEmptyDataObject(thisCache) : !jQuery.isEmptyObject(thisCache) ) { + return; + } + } + } + + // See jQuery.data for more information + if ( !pvt ) { + delete cache[ id ].data; + + // Don't destroy the parent cache unless the internal data object + // had been the only thing left in it + if ( !isEmptyDataObject( cache[ id ] ) ) { + return; + } + } + + // Destroy the cache + if ( isNode ) { + jQuery.cleanData( [ elem ], true ); + + // Use delete when supported for expandos or `cache` is not a window per isWindow (#10080) + /* jshint eqeqeq: false */ + } else if ( support.deleteExpando || cache != cache.window ) { + /* jshint eqeqeq: true */ + delete cache[ id ]; + + // When all else fails, null + } else { + cache[ id ] = null; + } +} + +jQuery.extend({ + cache: {}, + + // The following elements (space-suffixed to avoid Object.prototype collisions) + // throw uncatchable exceptions if you attempt to set expando properties + noData: { + "applet ": true, + "embed ": true, + // ...but Flash objects (which have this classid) *can* handle expandos + "object ": "clsid:D27CDB6E-AE6D-11cf-96B8-444553540000" + }, + + hasData: function( elem ) { + elem = elem.nodeType ? jQuery.cache[ elem[jQuery.expando] ] : elem[ jQuery.expando ]; + return !!elem && !isEmptyDataObject( elem ); + }, + + data: function( elem, name, data ) { + return internalData( elem, name, data ); + }, + + removeData: function( elem, name ) { + return internalRemoveData( elem, name ); + }, + + // For internal use only. + _data: function( elem, name, data ) { + return internalData( elem, name, data, true ); + }, + + _removeData: function( elem, name ) { + return internalRemoveData( elem, name, true ); + } +}); + +jQuery.fn.extend({ + data: function( key, value ) { + var i, name, data, + elem = this[0], + attrs = elem && elem.attributes; + + // Special expections of .data basically thwart jQuery.access, + // so implement the relevant behavior ourselves + + // Gets all values + if ( key === undefined ) { + if ( this.length ) { + data = jQuery.data( elem ); + + if ( elem.nodeType === 1 && !jQuery._data( elem, "parsedAttrs" ) ) { + i = attrs.length; + while ( i-- ) { + + // Support: IE11+ + // The attrs elements can be null (#14894) + if ( attrs[ i ] ) { + name = attrs[ i ].name; + if ( name.indexOf( "data-" ) === 0 ) { + name = jQuery.camelCase( name.slice(5) ); + dataAttr( elem, name, data[ name ] ); + } + } + } + jQuery._data( elem, "parsedAttrs", true ); + } + } + + return data; + } + + // Sets multiple values + if ( typeof key === "object" ) { + return this.each(function() { + jQuery.data( this, key ); + }); + } + + return arguments.length > 1 ? + + // Sets one value + this.each(function() { + jQuery.data( this, key, value ); + }) : + + // Gets one value + // Try to fetch any internally stored data first + elem ? dataAttr( elem, key, jQuery.data( elem, key ) ) : undefined; + }, + + removeData: function( key ) { + return this.each(function() { + jQuery.removeData( this, key ); + }); + } +}); + + +jQuery.extend({ + queue: function( elem, type, data ) { + var queue; + + if ( elem ) { + type = ( type || "fx" ) + "queue"; + queue = jQuery._data( elem, type ); + + // Speed up dequeue by getting out quickly if this is just a lookup + if ( data ) { + if ( !queue || jQuery.isArray(data) ) { + queue = jQuery._data( elem, type, jQuery.makeArray(data) ); + } else { + queue.push( data ); + } + } + return queue || []; + } + }, + + dequeue: function( elem, type ) { + type = type || "fx"; + + var queue = jQuery.queue( elem, type ), + startLength = queue.length, + fn = queue.shift(), + hooks = jQuery._queueHooks( elem, type ), + next = function() { + jQuery.dequeue( elem, type ); + }; + + // If the fx queue is dequeued, always remove the progress sentinel + if ( fn === "inprogress" ) { + fn = queue.shift(); + startLength--; + } + + if ( fn ) { + + // Add a progress sentinel to prevent the fx queue from being + // automatically dequeued + if ( type === "fx" ) { + queue.unshift( "inprogress" ); + } + + // clear up the last queue stop function + delete hooks.stop; + fn.call( elem, next, hooks ); + } + + if ( !startLength && hooks ) { + hooks.empty.fire(); + } + }, + + // not intended for public consumption - generates a queueHooks object, or returns the current one + _queueHooks: function( elem, type ) { + var key = type + "queueHooks"; + return jQuery._data( elem, key ) || jQuery._data( elem, key, { + empty: jQuery.Callbacks("once memory").add(function() { + jQuery._removeData( elem, type + "queue" ); + jQuery._removeData( elem, key ); + }) + }); + } +}); + +jQuery.fn.extend({ + queue: function( type, data ) { + var setter = 2; + + if ( typeof type !== "string" ) { + data = type; + type = "fx"; + setter--; + } + + if ( arguments.length < setter ) { + return jQuery.queue( this[0], type ); + } + + return data === undefined ? + this : + this.each(function() { + var queue = jQuery.queue( this, type, data ); + + // ensure a hooks for this queue + jQuery._queueHooks( this, type ); + + if ( type === "fx" && queue[0] !== "inprogress" ) { + jQuery.dequeue( this, type ); + } + }); + }, + dequeue: function( type ) { + return this.each(function() { + jQuery.dequeue( this, type ); + }); + }, + clearQueue: function( type ) { + return this.queue( type || "fx", [] ); + }, + // Get a promise resolved when queues of a certain type + // are emptied (fx is the type by default) + promise: function( type, obj ) { + var tmp, + count = 1, + defer = jQuery.Deferred(), + elements = this, + i = this.length, + resolve = function() { + if ( !( --count ) ) { + defer.resolveWith( elements, [ elements ] ); + } + }; + + if ( typeof type !== "string" ) { + obj = type; + type = undefined; + } + type = type || "fx"; + + while ( i-- ) { + tmp = jQuery._data( elements[ i ], type + "queueHooks" ); + if ( tmp && tmp.empty ) { + count++; + tmp.empty.add( resolve ); + } + } + resolve(); + return defer.promise( obj ); + } +}); +var pnum = (/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/).source; + +var cssExpand = [ "Top", "Right", "Bottom", "Left" ]; + +var isHidden = function( elem, el ) { + // isHidden might be called from jQuery#filter function; + // in that case, element will be second argument + elem = el || elem; + return jQuery.css( elem, "display" ) === "none" || !jQuery.contains( elem.ownerDocument, elem ); + }; + + + +// Multifunctional method to get and set values of a collection +// The value/s can optionally be executed if it's a function +var access = jQuery.access = function( elems, fn, key, value, chainable, emptyGet, raw ) { + var i = 0, + length = elems.length, + bulk = key == null; + + // Sets many values + if ( jQuery.type( key ) === "object" ) { + chainable = true; + for ( i in key ) { + jQuery.access( elems, fn, i, key[i], true, emptyGet, raw ); + } + + // Sets one value + } else if ( value !== undefined ) { + chainable = true; + + if ( !jQuery.isFunction( value ) ) { + raw = true; + } + + if ( bulk ) { + // Bulk operations run against the entire set + if ( raw ) { + fn.call( elems, value ); + fn = null; + + // ...except when executing function values + } else { + bulk = fn; + fn = function( elem, key, value ) { + return bulk.call( jQuery( elem ), value ); + }; + } + } + + if ( fn ) { + for ( ; i < length; i++ ) { + fn( elems[i], key, raw ? value : value.call( elems[i], i, fn( elems[i], key ) ) ); + } + } + } + + return chainable ? + elems : + + // Gets + bulk ? + fn.call( elems ) : + length ? fn( elems[0], key ) : emptyGet; +}; +var rcheckableType = (/^(?:checkbox|radio)$/i); + + + +(function() { + // Minified: var a,b,c + var input = document.createElement( "input" ), + div = document.createElement( "div" ), + fragment = document.createDocumentFragment(); + + // Setup + div.innerHTML = "
a"; + + // IE strips leading whitespace when .innerHTML is used + support.leadingWhitespace = div.firstChild.nodeType === 3; + + // Make sure that tbody elements aren't automatically inserted + // IE will insert them into empty tables + support.tbody = !div.getElementsByTagName( "tbody" ).length; + + // Make sure that link elements get serialized correctly by innerHTML + // This requires a wrapper element in IE + support.htmlSerialize = !!div.getElementsByTagName( "link" ).length; + + // Makes sure cloning an html5 element does not cause problems + // Where outerHTML is undefined, this still works + support.html5Clone = + document.createElement( "nav" ).cloneNode( true ).outerHTML !== "<:nav>"; + + // Check if a disconnected checkbox will retain its checked + // value of true after appended to the DOM (IE6/7) + input.type = "checkbox"; + input.checked = true; + fragment.appendChild( input ); + support.appendChecked = input.checked; + + // Make sure textarea (and checkbox) defaultValue is properly cloned + // Support: IE6-IE11+ + div.innerHTML = ""; + support.noCloneChecked = !!div.cloneNode( true ).lastChild.defaultValue; + + // #11217 - WebKit loses check when the name is after the checked attribute + fragment.appendChild( div ); + div.innerHTML = ""; + + // Support: Safari 5.1, iOS 5.1, Android 4.x, Android 2.3 + // old WebKit doesn't clone checked state correctly in fragments + support.checkClone = div.cloneNode( true ).cloneNode( true ).lastChild.checked; + + // Support: IE<9 + // Opera does not clone events (and typeof div.attachEvent === undefined). + // IE9-10 clones events bound via attachEvent, but they don't trigger with .click() + support.noCloneEvent = true; + if ( div.attachEvent ) { + div.attachEvent( "onclick", function() { + support.noCloneEvent = false; + }); + + div.cloneNode( true ).click(); + } + + // Execute the test only if not already executed in another module. + if (support.deleteExpando == null) { + // Support: IE<9 + support.deleteExpando = true; + try { + delete div.test; + } catch( e ) { + support.deleteExpando = false; + } + } +})(); + + +(function() { + var i, eventName, + div = document.createElement( "div" ); + + // Support: IE<9 (lack submit/change bubble), Firefox 23+ (lack focusin event) + for ( i in { submit: true, change: true, focusin: true }) { + eventName = "on" + i; + + if ( !(support[ i + "Bubbles" ] = eventName in window) ) { + // Beware of CSP restrictions (https://developer.mozilla.org/en/Security/CSP) + div.setAttribute( eventName, "t" ); + support[ i + "Bubbles" ] = div.attributes[ eventName ].expando === false; + } + } + + // Null elements to avoid leaks in IE. + div = null; +})(); + + +var rformElems = /^(?:input|select|textarea)$/i, + rkeyEvent = /^key/, + rmouseEvent = /^(?:mouse|pointer|contextmenu)|click/, + rfocusMorph = /^(?:focusinfocus|focusoutblur)$/, + rtypenamespace = /^([^.]*)(?:\.(.+)|)$/; + +function returnTrue() { + return true; +} + +function returnFalse() { + return false; +} + +function safeActiveElement() { + try { + return document.activeElement; + } catch ( err ) { } +} + +/* + * Helper functions for managing events -- not part of the public interface. + * Props to Dean Edwards' addEvent library for many of the ideas. + */ +jQuery.event = { + + global: {}, + + add: function( elem, types, handler, data, selector ) { + var tmp, events, t, handleObjIn, + special, eventHandle, handleObj, + handlers, type, namespaces, origType, + elemData = jQuery._data( elem ); + + // Don't attach events to noData or text/comment nodes (but allow plain objects) + if ( !elemData ) { + return; + } + + // Caller can pass in an object of custom data in lieu of the handler + if ( handler.handler ) { + handleObjIn = handler; + handler = handleObjIn.handler; + selector = handleObjIn.selector; + } + + // Make sure that the handler has a unique ID, used to find/remove it later + if ( !handler.guid ) { + handler.guid = jQuery.guid++; + } + + // Init the element's event structure and main handler, if this is the first + if ( !(events = elemData.events) ) { + events = elemData.events = {}; + } + if ( !(eventHandle = elemData.handle) ) { + eventHandle = elemData.handle = function( e ) { + // Discard the second event of a jQuery.event.trigger() and + // when an event is called after a page has unloaded + return typeof jQuery !== strundefined && (!e || jQuery.event.triggered !== e.type) ? + jQuery.event.dispatch.apply( eventHandle.elem, arguments ) : + undefined; + }; + // Add elem as a property of the handle fn to prevent a memory leak with IE non-native events + eventHandle.elem = elem; + } + + // Handle multiple events separated by a space + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // There *must* be a type, no attaching namespace-only handlers + if ( !type ) { + continue; + } + + // If event changes its type, use the special event handlers for the changed type + special = jQuery.event.special[ type ] || {}; + + // If selector defined, determine special event api type, otherwise given type + type = ( selector ? special.delegateType : special.bindType ) || type; + + // Update special based on newly reset type + special = jQuery.event.special[ type ] || {}; + + // handleObj is passed to all event handlers + handleObj = jQuery.extend({ + type: type, + origType: origType, + data: data, + handler: handler, + guid: handler.guid, + selector: selector, + needsContext: selector && jQuery.expr.match.needsContext.test( selector ), + namespace: namespaces.join(".") + }, handleObjIn ); + + // Init the event handler queue if we're the first + if ( !(handlers = events[ type ]) ) { + handlers = events[ type ] = []; + handlers.delegateCount = 0; + + // Only use addEventListener/attachEvent if the special events handler returns false + if ( !special.setup || special.setup.call( elem, data, namespaces, eventHandle ) === false ) { + // Bind the global event handler to the element + if ( elem.addEventListener ) { + elem.addEventListener( type, eventHandle, false ); + + } else if ( elem.attachEvent ) { + elem.attachEvent( "on" + type, eventHandle ); + } + } + } + + if ( special.add ) { + special.add.call( elem, handleObj ); + + if ( !handleObj.handler.guid ) { + handleObj.handler.guid = handler.guid; + } + } + + // Add to the element's handler list, delegates in front + if ( selector ) { + handlers.splice( handlers.delegateCount++, 0, handleObj ); + } else { + handlers.push( handleObj ); + } + + // Keep track of which events have ever been used, for event optimization + jQuery.event.global[ type ] = true; + } + + // Nullify elem to prevent memory leaks in IE + elem = null; + }, + + // Detach an event or set of events from an element + remove: function( elem, types, handler, selector, mappedTypes ) { + var j, handleObj, tmp, + origCount, t, events, + special, handlers, type, + namespaces, origType, + elemData = jQuery.hasData( elem ) && jQuery._data( elem ); + + if ( !elemData || !(events = elemData.events) ) { + return; + } + + // Once for each type.namespace in types; type may be omitted + types = ( types || "" ).match( rnotwhite ) || [ "" ]; + t = types.length; + while ( t-- ) { + tmp = rtypenamespace.exec( types[t] ) || []; + type = origType = tmp[1]; + namespaces = ( tmp[2] || "" ).split( "." ).sort(); + + // Unbind all events (on this namespace, if provided) for the element + if ( !type ) { + for ( type in events ) { + jQuery.event.remove( elem, type + types[ t ], handler, selector, true ); + } + continue; + } + + special = jQuery.event.special[ type ] || {}; + type = ( selector ? special.delegateType : special.bindType ) || type; + handlers = events[ type ] || []; + tmp = tmp[2] && new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ); + + // Remove matching events + origCount = j = handlers.length; + while ( j-- ) { + handleObj = handlers[ j ]; + + if ( ( mappedTypes || origType === handleObj.origType ) && + ( !handler || handler.guid === handleObj.guid ) && + ( !tmp || tmp.test( handleObj.namespace ) ) && + ( !selector || selector === handleObj.selector || selector === "**" && handleObj.selector ) ) { + handlers.splice( j, 1 ); + + if ( handleObj.selector ) { + handlers.delegateCount--; + } + if ( special.remove ) { + special.remove.call( elem, handleObj ); + } + } + } + + // Remove generic event handler if we removed something and no more handlers exist + // (avoids potential for endless recursion during removal of special event handlers) + if ( origCount && !handlers.length ) { + if ( !special.teardown || special.teardown.call( elem, namespaces, elemData.handle ) === false ) { + jQuery.removeEvent( elem, type, elemData.handle ); + } + + delete events[ type ]; + } + } + + // Remove the expando if it's no longer used + if ( jQuery.isEmptyObject( events ) ) { + delete elemData.handle; + + // removeData also checks for emptiness and clears the expando if empty + // so use it instead of delete + jQuery._removeData( elem, "events" ); + } + }, + + trigger: function( event, data, elem, onlyHandlers ) { + var handle, ontype, cur, + bubbleType, special, tmp, i, + eventPath = [ elem || document ], + type = hasOwn.call( event, "type" ) ? event.type : event, + namespaces = hasOwn.call( event, "namespace" ) ? event.namespace.split(".") : []; + + cur = tmp = elem = elem || document; + + // Don't do events on text and comment nodes + if ( elem.nodeType === 3 || elem.nodeType === 8 ) { + return; + } + + // focus/blur morphs to focusin/out; ensure we're not firing them right now + if ( rfocusMorph.test( type + jQuery.event.triggered ) ) { + return; + } + + if ( type.indexOf(".") >= 0 ) { + // Namespaced trigger; create a regexp to match event type in handle() + namespaces = type.split("."); + type = namespaces.shift(); + namespaces.sort(); + } + ontype = type.indexOf(":") < 0 && "on" + type; + + // Caller can pass in a jQuery.Event object, Object, or just an event type string + event = event[ jQuery.expando ] ? + event : + new jQuery.Event( type, typeof event === "object" && event ); + + // Trigger bitmask: & 1 for native handlers; & 2 for jQuery (always true) + event.isTrigger = onlyHandlers ? 2 : 3; + event.namespace = namespaces.join("."); + event.namespace_re = event.namespace ? + new RegExp( "(^|\\.)" + namespaces.join("\\.(?:.*\\.|)") + "(\\.|$)" ) : + null; + + // Clean up the event in case it is being reused + event.result = undefined; + if ( !event.target ) { + event.target = elem; + } + + // Clone any incoming data and prepend the event, creating the handler arg list + data = data == null ? + [ event ] : + jQuery.makeArray( data, [ event ] ); + + // Allow special events to draw outside the lines + special = jQuery.event.special[ type ] || {}; + if ( !onlyHandlers && special.trigger && special.trigger.apply( elem, data ) === false ) { + return; + } + + // Determine event propagation path in advance, per W3C events spec (#9951) + // Bubble up to document, then to window; watch for a global ownerDocument var (#9724) + if ( !onlyHandlers && !special.noBubble && !jQuery.isWindow( elem ) ) { + + bubbleType = special.delegateType || type; + if ( !rfocusMorph.test( bubbleType + type ) ) { + cur = cur.parentNode; + } + for ( ; cur; cur = cur.parentNode ) { + eventPath.push( cur ); + tmp = cur; + } + + // Only add window if we got to document (e.g., not plain obj or detached DOM) + if ( tmp === (elem.ownerDocument || document) ) { + eventPath.push( tmp.defaultView || tmp.parentWindow || window ); + } + } + + // Fire handlers on the event path + i = 0; + while ( (cur = eventPath[i++]) && !event.isPropagationStopped() ) { + + event.type = i > 1 ? + bubbleType : + special.bindType || type; + + // jQuery handler + handle = ( jQuery._data( cur, "events" ) || {} )[ event.type ] && jQuery._data( cur, "handle" ); + if ( handle ) { + handle.apply( cur, data ); + } + + // Native handler + handle = ontype && cur[ ontype ]; + if ( handle && handle.apply && jQuery.acceptData( cur ) ) { + event.result = handle.apply( cur, data ); + if ( event.result === false ) { + event.preventDefault(); + } + } + } + event.type = type; + + // If nobody prevented the default action, do it now + if ( !onlyHandlers && !event.isDefaultPrevented() ) { + + if ( (!special._default || special._default.apply( eventPath.pop(), data ) === false) && + jQuery.acceptData( elem ) ) { + + // Call a native DOM method on the target with the same name name as the event. + // Can't use an .isFunction() check here because IE6/7 fails that test. + // Don't do default actions on window, that's where global variables be (#6170) + if ( ontype && elem[ type ] && !jQuery.isWindow( elem ) ) { + + // Don't re-trigger an onFOO event when we call its FOO() method + tmp = elem[ ontype ]; + + if ( tmp ) { + elem[ ontype ] = null; + } + + // Prevent re-triggering of the same event, since we already bubbled it above + jQuery.event.triggered = type; + try { + elem[ type ](); + } catch ( e ) { + // IE<9 dies on focus/blur to hidden element (#1486,#12518) + // only reproducible on winXP IE8 native, not IE9 in IE8 mode + } + jQuery.event.triggered = undefined; + + if ( tmp ) { + elem[ ontype ] = tmp; + } + } + } + } + + return event.result; + }, + + dispatch: function( event ) { + + // Make a writable jQuery.Event from the native event object + event = jQuery.event.fix( event ); + + var i, ret, handleObj, matched, j, + handlerQueue = [], + args = slice.call( arguments ), + handlers = ( jQuery._data( this, "events" ) || {} )[ event.type ] || [], + special = jQuery.event.special[ event.type ] || {}; + + // Use the fix-ed jQuery.Event rather than the (read-only) native event + args[0] = event; + event.delegateTarget = this; + + // Call the preDispatch hook for the mapped type, and let it bail if desired + if ( special.preDispatch && special.preDispatch.call( this, event ) === false ) { + return; + } + + // Determine handlers + handlerQueue = jQuery.event.handlers.call( this, event, handlers ); + + // Run delegates first; they may want to stop propagation beneath us + i = 0; + while ( (matched = handlerQueue[ i++ ]) && !event.isPropagationStopped() ) { + event.currentTarget = matched.elem; + + j = 0; + while ( (handleObj = matched.handlers[ j++ ]) && !event.isImmediatePropagationStopped() ) { + + // Triggered event must either 1) have no namespace, or + // 2) have namespace(s) a subset or equal to those in the bound event (both can have no namespace). + if ( !event.namespace_re || event.namespace_re.test( handleObj.namespace ) ) { + + event.handleObj = handleObj; + event.data = handleObj.data; + + ret = ( (jQuery.event.special[ handleObj.origType ] || {}).handle || handleObj.handler ) + .apply( matched.elem, args ); + + if ( ret !== undefined ) { + if ( (event.result = ret) === false ) { + event.preventDefault(); + event.stopPropagation(); + } + } + } + } + } + + // Call the postDispatch hook for the mapped type + if ( special.postDispatch ) { + special.postDispatch.call( this, event ); + } + + return event.result; + }, + + handlers: function( event, handlers ) { + var sel, handleObj, matches, i, + handlerQueue = [], + delegateCount = handlers.delegateCount, + cur = event.target; + + // Find delegate handlers + // Black-hole SVG instance trees (#13180) + // Avoid non-left-click bubbling in Firefox (#3861) + if ( delegateCount && cur.nodeType && (!event.button || event.type !== "click") ) { + + /* jshint eqeqeq: false */ + for ( ; cur != this; cur = cur.parentNode || this ) { + /* jshint eqeqeq: true */ + + // Don't check non-elements (#13208) + // Don't process clicks on disabled elements (#6911, #8165, #11382, #11764) + if ( cur.nodeType === 1 && (cur.disabled !== true || event.type !== "click") ) { + matches = []; + for ( i = 0; i < delegateCount; i++ ) { + handleObj = handlers[ i ]; + + // Don't conflict with Object.prototype properties (#13203) + sel = handleObj.selector + " "; + + if ( matches[ sel ] === undefined ) { + matches[ sel ] = handleObj.needsContext ? + jQuery( sel, this ).index( cur ) >= 0 : + jQuery.find( sel, this, null, [ cur ] ).length; + } + if ( matches[ sel ] ) { + matches.push( handleObj ); + } + } + if ( matches.length ) { + handlerQueue.push({ elem: cur, handlers: matches }); + } + } + } + } + + // Add the remaining (directly-bound) handlers + if ( delegateCount < handlers.length ) { + handlerQueue.push({ elem: this, handlers: handlers.slice( delegateCount ) }); + } + + return handlerQueue; + }, + + fix: function( event ) { + if ( event[ jQuery.expando ] ) { + return event; + } + + // Create a writable copy of the event object and normalize some properties + var i, prop, copy, + type = event.type, + originalEvent = event, + fixHook = this.fixHooks[ type ]; + + if ( !fixHook ) { + this.fixHooks[ type ] = fixHook = + rmouseEvent.test( type ) ? this.mouseHooks : + rkeyEvent.test( type ) ? this.keyHooks : + {}; + } + copy = fixHook.props ? this.props.concat( fixHook.props ) : this.props; + + event = new jQuery.Event( originalEvent ); + + i = copy.length; + while ( i-- ) { + prop = copy[ i ]; + event[ prop ] = originalEvent[ prop ]; + } + + // Support: IE<9 + // Fix target property (#1925) + if ( !event.target ) { + event.target = originalEvent.srcElement || document; + } + + // Support: Chrome 23+, Safari? + // Target should not be a text node (#504, #13143) + if ( event.target.nodeType === 3 ) { + event.target = event.target.parentNode; + } + + // Support: IE<9 + // For mouse/key events, metaKey==false if it's undefined (#3368, #11328) + event.metaKey = !!event.metaKey; + + return fixHook.filter ? fixHook.filter( event, originalEvent ) : event; + }, + + // Includes some event props shared by KeyEvent and MouseEvent + props: "altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "), + + fixHooks: {}, + + keyHooks: { + props: "char charCode key keyCode".split(" "), + filter: function( event, original ) { + + // Add which for key events + if ( event.which == null ) { + event.which = original.charCode != null ? original.charCode : original.keyCode; + } + + return event; + } + }, + + mouseHooks: { + props: "button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "), + filter: function( event, original ) { + var body, eventDoc, doc, + button = original.button, + fromElement = original.fromElement; + + // Calculate pageX/Y if missing and clientX/Y available + if ( event.pageX == null && original.clientX != null ) { + eventDoc = event.target.ownerDocument || document; + doc = eventDoc.documentElement; + body = eventDoc.body; + + event.pageX = original.clientX + ( doc && doc.scrollLeft || body && body.scrollLeft || 0 ) - ( doc && doc.clientLeft || body && body.clientLeft || 0 ); + event.pageY = original.clientY + ( doc && doc.scrollTop || body && body.scrollTop || 0 ) - ( doc && doc.clientTop || body && body.clientTop || 0 ); + } + + // Add relatedTarget, if necessary + if ( !event.relatedTarget && fromElement ) { + event.relatedTarget = fromElement === event.target ? original.toElement : fromElement; + } + + // Add which for click: 1 === left; 2 === middle; 3 === right + // Note: button is not normalized, so don't use it + if ( !event.which && button !== undefined ) { + event.which = ( button & 1 ? 1 : ( button & 2 ? 3 : ( button & 4 ? 2 : 0 ) ) ); + } + + return event; + } + }, + + special: { + load: { + // Prevent triggered image.load events from bubbling to window.load + noBubble: true + }, + focus: { + // Fire native event if possible so blur/focus sequence is correct + trigger: function() { + if ( this !== safeActiveElement() && this.focus ) { + try { + this.focus(); + return false; + } catch ( e ) { + // Support: IE<9 + // If we error on focus to hidden element (#1486, #12518), + // let .trigger() run the handlers + } + } + }, + delegateType: "focusin" + }, + blur: { + trigger: function() { + if ( this === safeActiveElement() && this.blur ) { + this.blur(); + return false; + } + }, + delegateType: "focusout" + }, + click: { + // For checkbox, fire native event so checked state will be right + trigger: function() { + if ( jQuery.nodeName( this, "input" ) && this.type === "checkbox" && this.click ) { + this.click(); + return false; + } + }, + + // For cross-browser consistency, don't fire native .click() on links + _default: function( event ) { + return jQuery.nodeName( event.target, "a" ); + } + }, + + beforeunload: { + postDispatch: function( event ) { + + // Support: Firefox 20+ + // Firefox doesn't alert if the returnValue field is not set. + if ( event.result !== undefined && event.originalEvent ) { + event.originalEvent.returnValue = event.result; + } + } + } + }, + + simulate: function( type, elem, event, bubble ) { + // Piggyback on a donor event to simulate a different one. + // Fake originalEvent to avoid donor's stopPropagation, but if the + // simulated event prevents default then we do the same on the donor. + var e = jQuery.extend( + new jQuery.Event(), + event, + { + type: type, + isSimulated: true, + originalEvent: {} + } + ); + if ( bubble ) { + jQuery.event.trigger( e, null, elem ); + } else { + jQuery.event.dispatch.call( elem, e ); + } + if ( e.isDefaultPrevented() ) { + event.preventDefault(); + } + } +}; + +jQuery.removeEvent = document.removeEventListener ? + function( elem, type, handle ) { + if ( elem.removeEventListener ) { + elem.removeEventListener( type, handle, false ); + } + } : + function( elem, type, handle ) { + var name = "on" + type; + + if ( elem.detachEvent ) { + + // #8545, #7054, preventing memory leaks for custom events in IE6-8 + // detachEvent needed property on element, by name of that event, to properly expose it to GC + if ( typeof elem[ name ] === strundefined ) { + elem[ name ] = null; + } + + elem.detachEvent( name, handle ); + } + }; + +jQuery.Event = function( src, props ) { + // Allow instantiation without the 'new' keyword + if ( !(this instanceof jQuery.Event) ) { + return new jQuery.Event( src, props ); + } + + // Event object + if ( src && src.type ) { + this.originalEvent = src; + this.type = src.type; + + // Events bubbling up the document may have been marked as prevented + // by a handler lower down the tree; reflect the correct value. + this.isDefaultPrevented = src.defaultPrevented || + src.defaultPrevented === undefined && + // Support: IE < 9, Android < 4.0 + src.returnValue === false ? + returnTrue : + returnFalse; + + // Event type + } else { + this.type = src; + } + + // Put explicitly provided properties onto the event object + if ( props ) { + jQuery.extend( this, props ); + } + + // Create a timestamp if incoming event doesn't have one + this.timeStamp = src && src.timeStamp || jQuery.now(); + + // Mark it as fixed + this[ jQuery.expando ] = true; +}; + +// jQuery.Event is based on DOM3 Events as specified by the ECMAScript Language Binding +// http://www.w3.org/TR/2003/WD-DOM-Level-3-Events-20030331/ecma-script-binding.html +jQuery.Event.prototype = { + isDefaultPrevented: returnFalse, + isPropagationStopped: returnFalse, + isImmediatePropagationStopped: returnFalse, + + preventDefault: function() { + var e = this.originalEvent; + + this.isDefaultPrevented = returnTrue; + if ( !e ) { + return; + } + + // If preventDefault exists, run it on the original event + if ( e.preventDefault ) { + e.preventDefault(); + + // Support: IE + // Otherwise set the returnValue property of the original event to false + } else { + e.returnValue = false; + } + }, + stopPropagation: function() { + var e = this.originalEvent; + + this.isPropagationStopped = returnTrue; + if ( !e ) { + return; + } + // If stopPropagation exists, run it on the original event + if ( e.stopPropagation ) { + e.stopPropagation(); + } + + // Support: IE + // Set the cancelBubble property of the original event to true + e.cancelBubble = true; + }, + stopImmediatePropagation: function() { + var e = this.originalEvent; + + this.isImmediatePropagationStopped = returnTrue; + + if ( e && e.stopImmediatePropagation ) { + e.stopImmediatePropagation(); + } + + this.stopPropagation(); + } +}; + +// Create mouseenter/leave events using mouseover/out and event-time checks +jQuery.each({ + mouseenter: "mouseover", + mouseleave: "mouseout", + pointerenter: "pointerover", + pointerleave: "pointerout" +}, function( orig, fix ) { + jQuery.event.special[ orig ] = { + delegateType: fix, + bindType: fix, + + handle: function( event ) { + var ret, + target = this, + related = event.relatedTarget, + handleObj = event.handleObj; + + // For mousenter/leave call the handler if related is outside the target. + // NB: No relatedTarget if the mouse left/entered the browser window + if ( !related || (related !== target && !jQuery.contains( target, related )) ) { + event.type = handleObj.origType; + ret = handleObj.handler.apply( this, arguments ); + event.type = fix; + } + return ret; + } + }; +}); + +// IE submit delegation +if ( !support.submitBubbles ) { + + jQuery.event.special.submit = { + setup: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Lazy-add a submit handler when a descendant form may potentially be submitted + jQuery.event.add( this, "click._submit keypress._submit", function( e ) { + // Node name check avoids a VML-related crash in IE (#9807) + var elem = e.target, + form = jQuery.nodeName( elem, "input" ) || jQuery.nodeName( elem, "button" ) ? elem.form : undefined; + if ( form && !jQuery._data( form, "submitBubbles" ) ) { + jQuery.event.add( form, "submit._submit", function( event ) { + event._submit_bubble = true; + }); + jQuery._data( form, "submitBubbles", true ); + } + }); + // return undefined since we don't need an event listener + }, + + postDispatch: function( event ) { + // If form was submitted by the user, bubble the event up the tree + if ( event._submit_bubble ) { + delete event._submit_bubble; + if ( this.parentNode && !event.isTrigger ) { + jQuery.event.simulate( "submit", this.parentNode, event, true ); + } + } + }, + + teardown: function() { + // Only need this for delegated form submit events + if ( jQuery.nodeName( this, "form" ) ) { + return false; + } + + // Remove delegated handlers; cleanData eventually reaps submit handlers attached above + jQuery.event.remove( this, "._submit" ); + } + }; +} + +// IE change delegation and checkbox/radio fix +if ( !support.changeBubbles ) { + + jQuery.event.special.change = { + + setup: function() { + + if ( rformElems.test( this.nodeName ) ) { + // IE doesn't fire change on a check/radio until blur; trigger it on click + // after a propertychange. Eat the blur-change in special.change.handle. + // This still fires onchange a second time for check/radio after blur. + if ( this.type === "checkbox" || this.type === "radio" ) { + jQuery.event.add( this, "propertychange._change", function( event ) { + if ( event.originalEvent.propertyName === "checked" ) { + this._just_changed = true; + } + }); + jQuery.event.add( this, "click._change", function( event ) { + if ( this._just_changed && !event.isTrigger ) { + this._just_changed = false; + } + // Allow triggered, simulated change events (#11500) + jQuery.event.simulate( "change", this, event, true ); + }); + } + return false; + } + // Delegated event; lazy-add a change handler on descendant inputs + jQuery.event.add( this, "beforeactivate._change", function( e ) { + var elem = e.target; + + if ( rformElems.test( elem.nodeName ) && !jQuery._data( elem, "changeBubbles" ) ) { + jQuery.event.add( elem, "change._change", function( event ) { + if ( this.parentNode && !event.isSimulated && !event.isTrigger ) { + jQuery.event.simulate( "change", this.parentNode, event, true ); + } + }); + jQuery._data( elem, "changeBubbles", true ); + } + }); + }, + + handle: function( event ) { + var elem = event.target; + + // Swallow native change events from checkbox/radio, we already triggered them above + if ( this !== elem || event.isSimulated || event.isTrigger || (elem.type !== "radio" && elem.type !== "checkbox") ) { + return event.handleObj.handler.apply( this, arguments ); + } + }, + + teardown: function() { + jQuery.event.remove( this, "._change" ); + + return !rformElems.test( this.nodeName ); + } + }; +} + +// Create "bubbling" focus and blur events +if ( !support.focusinBubbles ) { + jQuery.each({ focus: "focusin", blur: "focusout" }, function( orig, fix ) { + + // Attach a single capturing handler on the document while someone wants focusin/focusout + var handler = function( event ) { + jQuery.event.simulate( fix, event.target, jQuery.event.fix( event ), true ); + }; + + jQuery.event.special[ fix ] = { + setup: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ); + + if ( !attaches ) { + doc.addEventListener( orig, handler, true ); + } + jQuery._data( doc, fix, ( attaches || 0 ) + 1 ); + }, + teardown: function() { + var doc = this.ownerDocument || this, + attaches = jQuery._data( doc, fix ) - 1; + + if ( !attaches ) { + doc.removeEventListener( orig, handler, true ); + jQuery._removeData( doc, fix ); + } else { + jQuery._data( doc, fix, attaches ); + } + } + }; + }); +} + +jQuery.fn.extend({ + + on: function( types, selector, data, fn, /*INTERNAL*/ one ) { + var type, origFn; + + // Types can be a map of types/handlers + if ( typeof types === "object" ) { + // ( types-Object, selector, data ) + if ( typeof selector !== "string" ) { + // ( types-Object, data ) + data = data || selector; + selector = undefined; + } + for ( type in types ) { + this.on( type, selector, data, types[ type ], one ); + } + return this; + } + + if ( data == null && fn == null ) { + // ( types, fn ) + fn = selector; + data = selector = undefined; + } else if ( fn == null ) { + if ( typeof selector === "string" ) { + // ( types, selector, fn ) + fn = data; + data = undefined; + } else { + // ( types, data, fn ) + fn = data; + data = selector; + selector = undefined; + } + } + if ( fn === false ) { + fn = returnFalse; + } else if ( !fn ) { + return this; + } + + if ( one === 1 ) { + origFn = fn; + fn = function( event ) { + // Can use an empty set, since event contains the info + jQuery().off( event ); + return origFn.apply( this, arguments ); + }; + // Use same guid so caller can remove using origFn + fn.guid = origFn.guid || ( origFn.guid = jQuery.guid++ ); + } + return this.each( function() { + jQuery.event.add( this, types, fn, data, selector ); + }); + }, + one: function( types, selector, data, fn ) { + return this.on( types, selector, data, fn, 1 ); + }, + off: function( types, selector, fn ) { + var handleObj, type; + if ( types && types.preventDefault && types.handleObj ) { + // ( event ) dispatched jQuery.Event + handleObj = types.handleObj; + jQuery( types.delegateTarget ).off( + handleObj.namespace ? handleObj.origType + "." + handleObj.namespace : handleObj.origType, + handleObj.selector, + handleObj.handler + ); + return this; + } + if ( typeof types === "object" ) { + // ( types-object [, selector] ) + for ( type in types ) { + this.off( type, selector, types[ type ] ); + } + return this; + } + if ( selector === false || typeof selector === "function" ) { + // ( types [, fn] ) + fn = selector; + selector = undefined; + } + if ( fn === false ) { + fn = returnFalse; + } + return this.each(function() { + jQuery.event.remove( this, types, fn, selector ); + }); + }, + + trigger: function( type, data ) { + return this.each(function() { + jQuery.event.trigger( type, data, this ); + }); + }, + triggerHandler: function( type, data ) { + var elem = this[0]; + if ( elem ) { + return jQuery.event.trigger( type, data, elem, true ); + } + } +}); + + +function createSafeFragment( document ) { + var list = nodeNames.split( "|" ), + safeFrag = document.createDocumentFragment(); + + if ( safeFrag.createElement ) { + while ( list.length ) { + safeFrag.createElement( + list.pop() + ); + } + } + return safeFrag; +} + +var nodeNames = "abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|" + + "header|hgroup|mark|meter|nav|output|progress|section|summary|time|video", + rinlinejQuery = / jQuery\d+="(?:null|\d+)"/g, + rnoshimcache = new RegExp("<(?:" + nodeNames + ")[\\s/>]", "i"), + rleadingWhitespace = /^\s+/, + rxhtmlTag = /<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi, + rtagName = /<([\w:]+)/, + rtbody = /\s*$/g, + + // We have to close these tags to support XHTML (#13200) + wrapMap = { + option: [ 1, "" ], + legend: [ 1, "
", "
" ], + area: [ 1, "", "" ], + param: [ 1, "", "" ], + thead: [ 1, "", "
" ], + tr: [ 2, "", "
" ], + col: [ 2, "", "
" ], + td: [ 3, "", "
" ], + + // IE6-8 can't serialize link, script, style, or any html5 (NoScope) tags, + // unless wrapped in a div with non-breaking characters in front of it. + _default: support.htmlSerialize ? [ 0, "", "" ] : [ 1, "X
", "
" ] + }, + safeFragment = createSafeFragment( document ), + fragmentDiv = safeFragment.appendChild( document.createElement("div") ); + +wrapMap.optgroup = wrapMap.option; +wrapMap.tbody = wrapMap.tfoot = wrapMap.colgroup = wrapMap.caption = wrapMap.thead; +wrapMap.th = wrapMap.td; + +function getAll( context, tag ) { + var elems, elem, + i = 0, + found = typeof context.getElementsByTagName !== strundefined ? context.getElementsByTagName( tag || "*" ) : + typeof context.querySelectorAll !== strundefined ? context.querySelectorAll( tag || "*" ) : + undefined; + + if ( !found ) { + for ( found = [], elems = context.childNodes || context; (elem = elems[i]) != null; i++ ) { + if ( !tag || jQuery.nodeName( elem, tag ) ) { + found.push( elem ); + } else { + jQuery.merge( found, getAll( elem, tag ) ); + } + } + } + + return tag === undefined || tag && jQuery.nodeName( context, tag ) ? + jQuery.merge( [ context ], found ) : + found; +} + +// Used in buildFragment, fixes the defaultChecked property +function fixDefaultChecked( elem ) { + if ( rcheckableType.test( elem.type ) ) { + elem.defaultChecked = elem.checked; + } +} + +// Support: IE<8 +// Manipulating tables requires a tbody +function manipulationTarget( elem, content ) { + return jQuery.nodeName( elem, "table" ) && + jQuery.nodeName( content.nodeType !== 11 ? content : content.firstChild, "tr" ) ? + + elem.getElementsByTagName("tbody")[0] || + elem.appendChild( elem.ownerDocument.createElement("tbody") ) : + elem; +} + +// Replace/restore the type attribute of script elements for safe DOM manipulation +function disableScript( elem ) { + elem.type = (jQuery.find.attr( elem, "type" ) !== null) + "/" + elem.type; + return elem; +} +function restoreScript( elem ) { + var match = rscriptTypeMasked.exec( elem.type ); + if ( match ) { + elem.type = match[1]; + } else { + elem.removeAttribute("type"); + } + return elem; +} + +// Mark scripts as having already been evaluated +function setGlobalEval( elems, refElements ) { + var elem, + i = 0; + for ( ; (elem = elems[i]) != null; i++ ) { + jQuery._data( elem, "globalEval", !refElements || jQuery._data( refElements[i], "globalEval" ) ); + } +} + +function cloneCopyEvent( src, dest ) { + + if ( dest.nodeType !== 1 || !jQuery.hasData( src ) ) { + return; + } + + var type, i, l, + oldData = jQuery._data( src ), + curData = jQuery._data( dest, oldData ), + events = oldData.events; + + if ( events ) { + delete curData.handle; + curData.events = {}; + + for ( type in events ) { + for ( i = 0, l = events[ type ].length; i < l; i++ ) { + jQuery.event.add( dest, type, events[ type ][ i ] ); + } + } + } + + // make the cloned public data object a copy from the original + if ( curData.data ) { + curData.data = jQuery.extend( {}, curData.data ); + } +} + +function fixCloneNodeIssues( src, dest ) { + var nodeName, e, data; + + // We do not need to do anything for non-Elements + if ( dest.nodeType !== 1 ) { + return; + } + + nodeName = dest.nodeName.toLowerCase(); + + // IE6-8 copies events bound via attachEvent when using cloneNode. + if ( !support.noCloneEvent && dest[ jQuery.expando ] ) { + data = jQuery._data( dest ); + + for ( e in data.events ) { + jQuery.removeEvent( dest, e, data.handle ); + } + + // Event data gets referenced instead of copied if the expando gets copied too + dest.removeAttribute( jQuery.expando ); + } + + // IE blanks contents when cloning scripts, and tries to evaluate newly-set text + if ( nodeName === "script" && dest.text !== src.text ) { + disableScript( dest ).text = src.text; + restoreScript( dest ); + + // IE6-10 improperly clones children of object elements using classid. + // IE10 throws NoModificationAllowedError if parent is null, #12132. + } else if ( nodeName === "object" ) { + if ( dest.parentNode ) { + dest.outerHTML = src.outerHTML; + } + + // This path appears unavoidable for IE9. When cloning an object + // element in IE9, the outerHTML strategy above is not sufficient. + // If the src has innerHTML and the destination does not, + // copy the src.innerHTML into the dest.innerHTML. #10324 + if ( support.html5Clone && ( src.innerHTML && !jQuery.trim(dest.innerHTML) ) ) { + dest.innerHTML = src.innerHTML; + } + + } else if ( nodeName === "input" && rcheckableType.test( src.type ) ) { + // IE6-8 fails to persist the checked state of a cloned checkbox + // or radio button. Worse, IE6-7 fail to give the cloned element + // a checked appearance if the defaultChecked value isn't also set + + dest.defaultChecked = dest.checked = src.checked; + + // IE6-7 get confused and end up setting the value of a cloned + // checkbox/radio button to an empty string instead of "on" + if ( dest.value !== src.value ) { + dest.value = src.value; + } + + // IE6-8 fails to return the selected option to the default selected + // state when cloning options + } else if ( nodeName === "option" ) { + dest.defaultSelected = dest.selected = src.defaultSelected; + + // IE6-8 fails to set the defaultValue to the correct value when + // cloning other types of input fields + } else if ( nodeName === "input" || nodeName === "textarea" ) { + dest.defaultValue = src.defaultValue; + } +} + +jQuery.extend({ + clone: function( elem, dataAndEvents, deepDataAndEvents ) { + var destElements, node, clone, i, srcElements, + inPage = jQuery.contains( elem.ownerDocument, elem ); + + if ( support.html5Clone || jQuery.isXMLDoc(elem) || !rnoshimcache.test( "<" + elem.nodeName + ">" ) ) { + clone = elem.cloneNode( true ); + + // IE<=8 does not properly clone detached, unknown element nodes + } else { + fragmentDiv.innerHTML = elem.outerHTML; + fragmentDiv.removeChild( clone = fragmentDiv.firstChild ); + } + + if ( (!support.noCloneEvent || !support.noCloneChecked) && + (elem.nodeType === 1 || elem.nodeType === 11) && !jQuery.isXMLDoc(elem) ) { + + // We eschew Sizzle here for performance reasons: http://jsperf.com/getall-vs-sizzle/2 + destElements = getAll( clone ); + srcElements = getAll( elem ); + + // Fix all IE cloning issues + for ( i = 0; (node = srcElements[i]) != null; ++i ) { + // Ensure that the destination node is not null; Fixes #9587 + if ( destElements[i] ) { + fixCloneNodeIssues( node, destElements[i] ); + } + } + } + + // Copy the events from the original to the clone + if ( dataAndEvents ) { + if ( deepDataAndEvents ) { + srcElements = srcElements || getAll( elem ); + destElements = destElements || getAll( clone ); + + for ( i = 0; (node = srcElements[i]) != null; i++ ) { + cloneCopyEvent( node, destElements[i] ); + } + } else { + cloneCopyEvent( elem, clone ); + } + } + + // Preserve script evaluation history + destElements = getAll( clone, "script" ); + if ( destElements.length > 0 ) { + setGlobalEval( destElements, !inPage && getAll( elem, "script" ) ); + } + + destElements = srcElements = node = null; + + // Return the cloned set + return clone; + }, + + buildFragment: function( elems, context, scripts, selection ) { + var j, elem, contains, + tmp, tag, tbody, wrap, + l = elems.length, + + // Ensure a safe fragment + safe = createSafeFragment( context ), + + nodes = [], + i = 0; + + for ( ; i < l; i++ ) { + elem = elems[ i ]; + + if ( elem || elem === 0 ) { + + // Add nodes directly + if ( jQuery.type( elem ) === "object" ) { + jQuery.merge( nodes, elem.nodeType ? [ elem ] : elem ); + + // Convert non-html into a text node + } else if ( !rhtml.test( elem ) ) { + nodes.push( context.createTextNode( elem ) ); + + // Convert html into DOM nodes + } else { + tmp = tmp || safe.appendChild( context.createElement("div") ); + + // Deserialize a standard representation + tag = (rtagName.exec( elem ) || [ "", "" ])[ 1 ].toLowerCase(); + wrap = wrapMap[ tag ] || wrapMap._default; + + tmp.innerHTML = wrap[1] + elem.replace( rxhtmlTag, "<$1>" ) + wrap[2]; + + // Descend through wrappers to the right content + j = wrap[0]; + while ( j-- ) { + tmp = tmp.lastChild; + } + + // Manually add leading whitespace removed by IE + if ( !support.leadingWhitespace && rleadingWhitespace.test( elem ) ) { + nodes.push( context.createTextNode( rleadingWhitespace.exec( elem )[0] ) ); + } + + // Remove IE's autoinserted from table fragments + if ( !support.tbody ) { + + // String was a , *may* have spurious + elem = tag === "table" && !rtbody.test( elem ) ? + tmp.firstChild : + + // String was a bare or + wrap[1] === "
" && !rtbody.test( elem ) ? + tmp : + 0; + + j = elem && elem.childNodes.length; + while ( j-- ) { + if ( jQuery.nodeName( (tbody = elem.childNodes[j]), "tbody" ) && !tbody.childNodes.length ) { + elem.removeChild( tbody ); + } + } + } + + jQuery.merge( nodes, tmp.childNodes ); + + // Fix #12392 for WebKit and IE > 9 + tmp.textContent = ""; + + // Fix #12392 for oldIE + while ( tmp.firstChild ) { + tmp.removeChild( tmp.firstChild ); + } + + // Remember the top-level container for proper cleanup + tmp = safe.lastChild; + } + } + } + + // Fix #11356: Clear elements from fragment + if ( tmp ) { + safe.removeChild( tmp ); + } + + // Reset defaultChecked for any radios and checkboxes + // about to be appended to the DOM in IE 6/7 (#8060) + if ( !support.appendChecked ) { + jQuery.grep( getAll( nodes, "input" ), fixDefaultChecked ); + } + + i = 0; + while ( (elem = nodes[ i++ ]) ) { + + // #4087 - If origin and destination elements are the same, and this is + // that element, do not do anything + if ( selection && jQuery.inArray( elem, selection ) !== -1 ) { + continue; + } + + contains = jQuery.contains( elem.ownerDocument, elem ); + + // Append to fragment + tmp = getAll( safe.appendChild( elem ), "script" ); + + // Preserve script evaluation history + if ( contains ) { + setGlobalEval( tmp ); + } + + // Capture executables + if ( scripts ) { + j = 0; + while ( (elem = tmp[ j++ ]) ) { + if ( rscriptType.test( elem.type || "" ) ) { + scripts.push( elem ); + } + } + } + } + + tmp = null; + + return safe; + }, + + cleanData: function( elems, /* internal */ acceptData ) { + var elem, type, id, data, + i = 0, + internalKey = jQuery.expando, + cache = jQuery.cache, + deleteExpando = support.deleteExpando, + special = jQuery.event.special; + + for ( ; (elem = elems[i]) != null; i++ ) { + if ( acceptData || jQuery.acceptData( elem ) ) { + + id = elem[ internalKey ]; + data = id && cache[ id ]; + + if ( data ) { + if ( data.events ) { + for ( type in data.events ) { + if ( special[ type ] ) { + jQuery.event.remove( elem, type ); + + // This is a shortcut to avoid jQuery.event.remove's overhead + } else { + jQuery.removeEvent( elem, type, data.handle ); + } + } + } + + // Remove cache only if it was not already removed by jQuery.event.remove + if ( cache[ id ] ) { + + delete cache[ id ]; + + // IE does not allow us to delete expando properties from nodes, + // nor does it have a removeAttribute function on Document nodes; + // we must handle all of these cases + if ( deleteExpando ) { + delete elem[ internalKey ]; + + } else if ( typeof elem.removeAttribute !== strundefined ) { + elem.removeAttribute( internalKey ); + + } else { + elem[ internalKey ] = null; + } + + deletedIds.push( id ); + } + } + } + } + } +}); + +jQuery.fn.extend({ + text: function( value ) { + return access( this, function( value ) { + return value === undefined ? + jQuery.text( this ) : + this.empty().append( ( this[0] && this[0].ownerDocument || document ).createTextNode( value ) ); + }, null, value, arguments.length ); + }, + + append: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.appendChild( elem ); + } + }); + }, + + prepend: function() { + return this.domManip( arguments, function( elem ) { + if ( this.nodeType === 1 || this.nodeType === 11 || this.nodeType === 9 ) { + var target = manipulationTarget( this, elem ); + target.insertBefore( elem, target.firstChild ); + } + }); + }, + + before: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this ); + } + }); + }, + + after: function() { + return this.domManip( arguments, function( elem ) { + if ( this.parentNode ) { + this.parentNode.insertBefore( elem, this.nextSibling ); + } + }); + }, + + remove: function( selector, keepData /* Internal Use Only */ ) { + var elem, + elems = selector ? jQuery.filter( selector, this ) : this, + i = 0; + + for ( ; (elem = elems[i]) != null; i++ ) { + + if ( !keepData && elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem ) ); + } + + if ( elem.parentNode ) { + if ( keepData && jQuery.contains( elem.ownerDocument, elem ) ) { + setGlobalEval( getAll( elem, "script" ) ); + } + elem.parentNode.removeChild( elem ); + } + } + + return this; + }, + + empty: function() { + var elem, + i = 0; + + for ( ; (elem = this[i]) != null; i++ ) { + // Remove element nodes and prevent memory leaks + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + } + + // Remove any remaining nodes + while ( elem.firstChild ) { + elem.removeChild( elem.firstChild ); + } + + // If this is a select, ensure that it displays empty (#12336) + // Support: IE<9 + if ( elem.options && jQuery.nodeName( elem, "select" ) ) { + elem.options.length = 0; + } + } + + return this; + }, + + clone: function( dataAndEvents, deepDataAndEvents ) { + dataAndEvents = dataAndEvents == null ? false : dataAndEvents; + deepDataAndEvents = deepDataAndEvents == null ? dataAndEvents : deepDataAndEvents; + + return this.map(function() { + return jQuery.clone( this, dataAndEvents, deepDataAndEvents ); + }); + }, + + html: function( value ) { + return access( this, function( value ) { + var elem = this[ 0 ] || {}, + i = 0, + l = this.length; + + if ( value === undefined ) { + return elem.nodeType === 1 ? + elem.innerHTML.replace( rinlinejQuery, "" ) : + undefined; + } + + // See if we can take a shortcut and just use innerHTML + if ( typeof value === "string" && !rnoInnerhtml.test( value ) && + ( support.htmlSerialize || !rnoshimcache.test( value ) ) && + ( support.leadingWhitespace || !rleadingWhitespace.test( value ) ) && + !wrapMap[ (rtagName.exec( value ) || [ "", "" ])[ 1 ].toLowerCase() ] ) { + + value = value.replace( rxhtmlTag, "<$1>" ); + + try { + for (; i < l; i++ ) { + // Remove element nodes and prevent memory leaks + elem = this[i] || {}; + if ( elem.nodeType === 1 ) { + jQuery.cleanData( getAll( elem, false ) ); + elem.innerHTML = value; + } + } + + elem = 0; + + // If using innerHTML throws an exception, use the fallback method + } catch(e) {} + } + + if ( elem ) { + this.empty().append( value ); + } + }, null, value, arguments.length ); + }, + + replaceWith: function() { + var arg = arguments[ 0 ]; + + // Make the changes, replacing each context element with the new content + this.domManip( arguments, function( elem ) { + arg = this.parentNode; + + jQuery.cleanData( getAll( this ) ); + + if ( arg ) { + arg.replaceChild( elem, this ); + } + }); + + // Force removal if there was no new content (e.g., from empty arguments) + return arg && (arg.length || arg.nodeType) ? this : this.remove(); + }, + + detach: function( selector ) { + return this.remove( selector, true ); + }, + + domManip: function( args, callback ) { + + // Flatten any nested arrays + args = concat.apply( [], args ); + + var first, node, hasScripts, + scripts, doc, fragment, + i = 0, + l = this.length, + set = this, + iNoClone = l - 1, + value = args[0], + isFunction = jQuery.isFunction( value ); + + // We can't cloneNode fragments that contain checked, in WebKit + if ( isFunction || + ( l > 1 && typeof value === "string" && + !support.checkClone && rchecked.test( value ) ) ) { + return this.each(function( index ) { + var self = set.eq( index ); + if ( isFunction ) { + args[0] = value.call( this, index, self.html() ); + } + self.domManip( args, callback ); + }); + } + + if ( l ) { + fragment = jQuery.buildFragment( args, this[ 0 ].ownerDocument, false, this ); + first = fragment.firstChild; + + if ( fragment.childNodes.length === 1 ) { + fragment = first; + } + + if ( first ) { + scripts = jQuery.map( getAll( fragment, "script" ), disableScript ); + hasScripts = scripts.length; + + // Use the original fragment for the last item instead of the first because it can end up + // being emptied incorrectly in certain situations (#8070). + for ( ; i < l; i++ ) { + node = fragment; + + if ( i !== iNoClone ) { + node = jQuery.clone( node, true, true ); + + // Keep references to cloned scripts for later restoration + if ( hasScripts ) { + jQuery.merge( scripts, getAll( node, "script" ) ); + } + } + + callback.call( this[i], node, i ); + } + + if ( hasScripts ) { + doc = scripts[ scripts.length - 1 ].ownerDocument; + + // Reenable scripts + jQuery.map( scripts, restoreScript ); + + // Evaluate executable scripts on first document insertion + for ( i = 0; i < hasScripts; i++ ) { + node = scripts[ i ]; + if ( rscriptType.test( node.type || "" ) && + !jQuery._data( node, "globalEval" ) && jQuery.contains( doc, node ) ) { + + if ( node.src ) { + // Optional AJAX dependency, but won't run scripts if not present + if ( jQuery._evalUrl ) { + jQuery._evalUrl( node.src ); + } + } else { + jQuery.globalEval( ( node.text || node.textContent || node.innerHTML || "" ).replace( rcleanScript, "" ) ); + } + } + } + } + + // Fix #11809: Avoid leaking memory + fragment = first = null; + } + } + + return this; + } +}); + +jQuery.each({ + appendTo: "append", + prependTo: "prepend", + insertBefore: "before", + insertAfter: "after", + replaceAll: "replaceWith" +}, function( name, original ) { + jQuery.fn[ name ] = function( selector ) { + var elems, + i = 0, + ret = [], + insert = jQuery( selector ), + last = insert.length - 1; + + for ( ; i <= last; i++ ) { + elems = i === last ? this : this.clone(true); + jQuery( insert[i] )[ original ]( elems ); + + // Modern browsers can apply jQuery collections as arrays, but oldIE needs a .get() + push.apply( ret, elems.get() ); + } + + return this.pushStack( ret ); + }; +}); + + +var iframe, + elemdisplay = {}; + +/** + * Retrieve the actual display of a element + * @param {String} name nodeName of the element + * @param {Object} doc Document object + */ +// Called only from within defaultDisplay +function actualDisplay( name, doc ) { + var style, + elem = jQuery( doc.createElement( name ) ).appendTo( doc.body ), + + // getDefaultComputedStyle might be reliably used only on attached element + display = window.getDefaultComputedStyle && ( style = window.getDefaultComputedStyle( elem[ 0 ] ) ) ? + + // Use of this method is a temporary fix (more like optmization) until something better comes along, + // since it was removed from specification and supported only in FF + style.display : jQuery.css( elem[ 0 ], "display" ); + + // We don't have any data stored on the element, + // so use "detach" method as fast way to get rid of the element + elem.detach(); + + return display; +} + +/** + * Try to determine the default display value of an element + * @param {String} nodeName + */ +function defaultDisplay( nodeName ) { + var doc = document, + display = elemdisplay[ nodeName ]; + + if ( !display ) { + display = actualDisplay( nodeName, doc ); + + // If the simple way fails, read from inside an iframe + if ( display === "none" || !display ) { + + // Use the already-created iframe if possible + iframe = (iframe || jQuery( "