Skip to content

Commit

Permalink
Removal of unnecessary lists added by the 2to3 tool.
Browse files Browse the repository at this point in the history
  • Loading branch information
gavinevans committed May 30, 2018
1 parent d36d57a commit df46464
Show file tree
Hide file tree
Showing 30 changed files with 89 additions and 91 deletions.
2 changes: 1 addition & 1 deletion bin/improver-spot-extract
Original file line number Diff line number Diff line change
Expand Up @@ -233,7 +233,7 @@ def main():

# Loop through the requested diagnostics and load data the essential data
# and any additional data that is required.
for diagnostic_key in list(diagnostics.keys()):
for diagnostic_key in diagnostics:
diagnostic_name_in_filename = (
diagnostics[diagnostic_key]["name_in_filename"])
files_to_read = (
Expand Down
2 changes: 1 addition & 1 deletion bin/improver-threshold
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ def main():
thresholds_from_file = json.load(input_file)
thresholds = []
fuzzy_bounds = []
for key in list(thresholds_from_file.keys()):
for key in thresholds_from_file:
thresholds.append(float(key))
fuzzy_bounds.append(tuple(thresholds_from_file[key]))
except ValueError as err:
Expand Down
4 changes: 2 additions & 2 deletions lib/improver/ensemble_calibration/ensemble_calibration.py
Original file line number Diff line number Diff line change
Expand Up @@ -876,7 +876,7 @@ def _apply_params(

# If the coefficients are not available for the date, use the
# raw ensemble forecast as the calibrated ensemble forecast.
if date not in list(optimised_coeffs.keys()):
if date not in optimised_coeffs:
msg = ("Ensemble calibration not available "
"for forecasts with start time of {}. "
"Coefficients not available".format(
Expand All @@ -895,7 +895,7 @@ def _apply_params(
# Assigning coefficients to coefficient names.
if len(optimised_coeffs_at_date) == len(coeff_names):
optimised_coeffs_at_date = dict(
list(zip(coeff_names, optimised_coeffs_at_date)))
zip(coeff_names, optimised_coeffs_at_date))
elif len(optimised_coeffs_at_date) > len(coeff_names):
excess_beta = (
optimised_coeffs_at_date[len(coeff_names):].tolist())
Expand Down
8 changes: 4 additions & 4 deletions lib/improver/nbhood/nbhood.py
Original file line number Diff line number Diff line change
Expand Up @@ -239,8 +239,8 @@ def process(self, cube, mask_cube=None):
# neighbourhood, and then apply the neighbourhood
# processing method to smooth the field.
for cube_slice, radius in (
list(zip(cube_realization.slices_over("time"),
required_radii))):
zip(cube_realization.slices_over("time"),
required_radii)):
cube_slice = self.neighbourhood_method.run(
cube_slice, radius, mask_cube=mask_cube)
cubes_time.append(cube_slice)
Expand Down Expand Up @@ -313,7 +313,7 @@ def __init__(
except KeyError:
msg = ("The neighbourhood_method requested: {} is not a "
"supported method. Please choose from: {}".format(
neighbourhood_method, list(methods.keys())))
neighbourhood_method, methods.keys()))
raise KeyError(msg)


Expand Down Expand Up @@ -385,5 +385,5 @@ def __init__(
except KeyError:
msg = ("The neighbourhood_method requested: {} is not a "
"supported method. Please choose from: {}".format(
neighbourhood_method, list(methods.keys())))
neighbourhood_method, methods.keys()))
raise KeyError(msg)
Original file line number Diff line number Diff line change
Expand Up @@ -496,8 +496,8 @@ def process(self, temperature, relative_humidity, pressure):
relative_humidity_over_levels = relative_humidity.slices_over(
level_coord)
pressure_over_levels = pressure.slices_over(level_coord)
slices = list(zip(temperature_over_levels,
relative_humidity_over_levels, pressure_over_levels))
slices = zip(temperature_over_levels,
relative_humidity_over_levels, pressure_over_levels)
elif len(vertical_coords) > 0 and len(set(vertical_coords)) != 1:
raise ValueError('WetBulbTemperature: Cubes have differing '
'vertical coordinates.')
Expand Down
2 changes: 1 addition & 1 deletion lib/improver/spotdata/ancillaries.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def get_ancillary_data(diagnostics, ancillary_path):

# Check if the land mask is used for any diagnostics.
if any([(diagnostics[key]['neighbour_finding']['land_constraint'])
for key in list(diagnostics.keys())]):
for key in diagnostics:
try:
land = load_cube(
ancillary_path + '/land_mask.nc',
Expand Down
2 changes: 1 addition & 1 deletion lib/improver/spotdata/common_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ def extract_ad_at_time(additional_diagnostics, time, time_extract):
"""
ad_extracted = {}
for key in list(additional_diagnostics.keys()):
for key in additional_diagnostics:
cubes = additional_diagnostics[key]
ad_extracted[key] = extract_cube_at_time(cubes, time, time_extract)
return ad_extracted
6 changes: 3 additions & 3 deletions lib/improver/spotdata/extract_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -247,16 +247,16 @@ def make_cube(self, cube, data, sites):
# Build the new auxiliary coordinates.
crds = self._aux_coords_to_make()
aux_crds = []
for key, kwargs in zip(list(crds.keys()), iter(crds.values())):
for key, kwargs in zip(crds.keys(), crds.values()):
aux_data = np.array([entry[key] for entry in sites.values()])
crd = build_coordinate(aux_data, long_name=key, **kwargs)
aux_crds.append(crd)

# Construct zipped lists of coordinates and indices. New aux coords are
# associated with the index dimension.
n_dim_coords = len(dim_coords)
dim_coords = list(zip(dim_coords, list(range(n_dim_coords))))
aux_coords = list(zip(aux_crds, [n_dim_coords-1]*len(aux_crds)))
dim_coords = zip(dim_coords, range(n_dim_coords))
aux_coords = zip(aux_crds, [n_dim_coords-1]*len(aux_crds))

# Copy other cube metadata.
metadata_dict = copy.deepcopy(cube.metadata._asdict())
Expand Down
11 changes: 5 additions & 6 deletions lib/improver/spotdata/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,11 +149,11 @@ def run_spotdata(diagnostics, ancillary_data, sites, config_constants,
ancillary_data=ancillary_data, **neighbour_kwargs)

# Set up site-grid point neighbour lists for all IGPS methods being used.
for key in list(diagnostics.keys()):
for key in diagnostics:
neighbour_finding = diagnostics[key]['neighbour_finding']
neighbour_hash = construct_neighbour_hash(neighbour_finding)
# Check if defined neighbour method results already exist.
if neighbour_hash not in list(neighbours.keys()):
if neighbour_hash not in neighbours):
# If not, find neighbours with new method.
neighbours[neighbour_hash] = (
PointSelection(**neighbour_finding).process(
Expand All @@ -168,14 +168,13 @@ def run_spotdata(diagnostics, ancillary_data, sites, config_constants,
# selected. Determine number of diagnostics to establish
# multiprocessing pool size.
n_diagnostic_threads = (
min(len(list(diagnostics.keys())), mp.cpu_count()))
min(len(diagnostics.keys())), mp.cpu_count())

# Establish multiprocessing pool - each diagnostic processed on its
# own thread.
diagnostic_pool = mp.Pool(processes=n_diagnostic_threads)

diagnostic_keys = [
diagnostic_name for diagnostic_name in list(diagnostics.keys())]
diagnostic_keys = diagnostics.keys()

result = (
diagnostic_pool.map_async(
Expand All @@ -193,7 +192,7 @@ def run_spotdata(diagnostics, ancillary_data, sites, config_constants,
# Process diagnostics serially on one thread.
resulting_cubes = CubeList()
extrema_cubes = CubeList()
for key in list(diagnostics.keys()):
for key in diagnostics.keys():
resulting_cube, extrema_cubelist = (
process_diagnostic(
diagnostics, neighbours, sites,
Expand Down
2 changes: 1 addition & 1 deletion lib/improver/spotdata/neighbour_finding.py
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def process(self, cube, sites, ancillary_data,
"""
if self.method == 'fast_nearest_neighbour':
if 'orography' in list(ancillary_data.keys()):
if 'orography' in ancillary_data:
orography = ancillary_data['orography'].data
else:
orography = None
Expand Down
2 changes: 1 addition & 1 deletion lib/improver/spotdata/read_input.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ def get_method_prerequisites(method, diagnostic_data_path):
}

additional_diagnostics = {}
if method in list(prereq.keys()):
if method in prereq.keys():
for item in prereq[method]:
additional_diagnostics[item] = get_additional_diagnostics(
item, diagnostic_data_path)
Expand Down
14 changes: 7 additions & 7 deletions lib/improver/spotdata/site_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,9 @@ def parse_input(self, site_data):
"""
latitude_entries = [i_site for (i_site, site) in enumerate(site_data)
if 'latitude' in list(site.keys())]
if 'latitude' in site.keys()]
longitude_entries = [i_site for (i_site, site) in enumerate(site_data)
if 'longitude' in list(site.keys())]
if 'longitude' in site.keys()]

if not latitude_entries or not longitude_entries:
raise KeyError('longitude and latitude must be defined for '
Expand Down Expand Up @@ -177,21 +177,21 @@ def parse_input(self, site_data):
self.altitudes = np.full(n_sites, np.nan)
self.wmo_site = np.full(n_sites, 0, dtype=int)
for i_site, site in enumerate(site_data):
if ('altitude' in list(site.keys()) and
if ('altitude' in site and
site['altitude'] is not None):
self.altitudes[i_site] = site['altitude']
if 'wmo_id' in list(site.keys()) and site['wmo_id'] is not None:
if 'wmo_id' in site and site['wmo_id'] is not None:
self.wmo_site[i_site] = site['wmo_id']

# Identify UTC offset if it is provided in the input, otherwise set it
# based upon site longitude.
self.utc_offsets = np.full(n_sites, np.nan)
for i_site, site in enumerate(site_data):
if 'gmtoffset' in list(site.keys()):
if 'gmtoffset' in site.keys():
self.utc_offsets[i_site] = site['gmtoffset']
elif 'utcoffset' in list(site.keys()):
elif 'utcoffset' in site:
self.utc_offsets[i_site] = site['utcoffset']
elif 'utc_offset' in list(site.keys()):
elif 'utc_offset' in site:
self.utc_offsets[i_site] = site['utc_offset']

# If it's not been set, set it with the longitude based method.
Expand Down
18 changes: 9 additions & 9 deletions lib/improver/tests/argparser/test_ArgParser.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def test_create_argparser_with_no_arguments(self):
compulsory_arguments):
parser = ArgParser(central_arguments=None, specific_arguments=None)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertEqual(len(args), 0)

def test_create_argparser_only_compulsory_arguments(self):
Expand All @@ -99,7 +99,7 @@ def test_create_argparser_only_compulsory_arguments(self):
compulsory_arguments):
parser = ArgParser(central_arguments=None, specific_arguments=None)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo'])

def test_create_argparser_fails_with_unknown_centralized_argument(self):
Expand Down Expand Up @@ -134,7 +134,7 @@ def test_create_argparser_only_centralized_arguments(self):
parser = ArgParser(central_arguments=['foo'],
specific_arguments=None)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo'])

def test_create_argparser_only_specific_arguments(self):
Expand All @@ -152,7 +152,7 @@ def test_create_argparser_only_specific_arguments(self):
parser = ArgParser(central_arguments=None,
specific_arguments=specific_arguments)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo'])

def test_create_argparser_compulsory_and_centralized_arguments(self):
Expand All @@ -171,7 +171,7 @@ def test_create_argparser_compulsory_and_centralized_arguments(self):
parser = ArgParser(central_arguments=['bar'],
specific_arguments=None)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo', 'bar'])

def test_create_argparser_compulsory_and_specfic_arguments(self):
Expand All @@ -189,7 +189,7 @@ def test_create_argparser_compulsory_and_specfic_arguments(self):
parser = ArgParser(central_arguments=None,
specific_arguments=specific_arguments)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo', 'bar'])

def test_create_argparser_all_arguments(self):
Expand All @@ -209,7 +209,7 @@ def test_create_argparser_all_arguments(self):
parser = ArgParser(central_arguments=['bar'],
specific_arguments=specific_arguments)
args = parser.parse_args()
args = list(vars(args).keys())
args = vars(args)
self.assertCountEqual(args, ['foo', 'bar', 'baz'])


Expand All @@ -236,7 +236,7 @@ def test_adding_multiple_arguments(self):

parser.add_arguments(args_to_add)
result_args = parser.parse_args()
result_args = list(vars(result_args).keys())
result_args = vars(result_args)
# we could also add compulsory arguments to expected_namespace_keys
# and then assertCountEqual - (order unimportant), but this
# is unnecessary - just use loop:
Expand All @@ -257,7 +257,7 @@ def test_adding_argument_with_defined_kwargs_dict(self):

parser.add_arguments(args_to_add)
result_args = parser.parse_args()
result_args = list(vars(result_args).keys())
result_args = vars(result_args)
self.assertIn(expected_arg, result_args)

def test_adding_argument_with_defined_kwargs_dict_has_defualt(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -429,7 +429,7 @@ def test_basic(self):
optimised_coeffs, coeff_names = result
self.assertIsInstance(optimised_coeffs, dict)
self.assertIsInstance(coeff_names, list)
for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertEqual(
len(optimised_coeffs[key]), len(coeff_names))

Expand Down Expand Up @@ -459,7 +459,7 @@ def test_coefficient_values_for_gaussian_distribution(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs, coeff_names = result

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)
self.assertListEqual(coeff_names, ["gamma", "delta", "a", "beta"])

Expand Down Expand Up @@ -488,7 +488,7 @@ def test_coefficient_values_for_truncated_gaussian_distribution(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs, coeff_names = result

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)
self.assertListEqual(coeff_names, ["gamma", "delta", "a", "beta"])

Expand Down Expand Up @@ -530,7 +530,7 @@ def test_coefficient_values_for_gaussian_distribution_members(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs, coeff_names = result

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)
self.assertListEqual(coeff_names, ["gamma", "delta", "a", "beta"])

Expand Down Expand Up @@ -571,7 +571,7 @@ def test_coefficient_values_for_truncated_gaussian_distribution_mem(self):
result = plugin.estimate_coefficients_for_ngr(
current_forecast, historic_forecasts, truth)
optimised_coeffs, coeff_names = result
for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)
self.assertListEqual(coeff_names, ["gamma", "delta", "a", "beta"])

Expand Down Expand Up @@ -623,7 +623,7 @@ def test_truth_unit_conversion(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs = result[0]

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)

@ManageWarnings(
Expand Down Expand Up @@ -652,7 +652,7 @@ def test_historic_forecast_unit_conversion(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs = result[0]

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)

@ManageWarnings(
Expand Down Expand Up @@ -681,7 +681,7 @@ def test_current_forecast_unit_conversion(self):
current_forecast, historic_forecasts, truth)
optimised_coeffs = result[0]

for key in list(optimised_coeffs.keys()):
for key in optimised_coeffs:
self.assertArrayAlmostEqual(optimised_coeffs[key], data)

def test_truth_data_is_none(self):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,7 @@ def set_up_cube(zero_point_indices=((0, 0, 7, 7),), num_time_points=1,

cube.add_dim_coord(
DimCoord(
list(range(num_realization_points)),
range(num_realization_points),
standard_name='realization'), 0)
tunit = Unit("hours since 1970-01-01 00:00:00", "gregorian")
time_points = [402192.5 + _ for _ in range(num_time_points)]
Expand Down
6 changes: 3 additions & 3 deletions lib/improver/tests/spotdata/spotdata/test_ancillaries.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,15 +106,15 @@ def test_return_type(self):
diagnostics = {}
result = Plugin(diagnostics, self.directory)
self.assertIsInstance(result, dict)
for item in list(result.values()):
for item in result.values():
self.assertIsInstance(item, Cube)

def test_read_orography(self):
"""Test reading an orography netcdf file."""

diagnostics = {}
result = Plugin(diagnostics, self.directory)
self.assertIn('orography', list(result.keys()))
self.assertIn('orography', result)
self.assertIsInstance(result['orography'], Cube)
self.assertArrayEqual(result['orography'].data, self.orography.data)

Expand All @@ -123,7 +123,7 @@ def test_read_land_mask(self):
land constraint condition."""

result = Plugin(self.diagnostics, self.directory)
self.assertIn('land_mask', list(result.keys()))
self.assertIn('land_mask', result)
self.assertIsInstance(result['land_mask'], Cube)
self.assertArrayEqual(result['land_mask'].data, self.land.data)

Expand Down
Loading

0 comments on commit df46464

Please sign in to comment.