diff --git a/CHANGELOG.md b/CHANGELOG.md index b0a2180c8..f7f13befe 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # OMC3 Changelog +#### 2022-11-08 - v0.7.0 - _jdilly_ + +- Added: + - Tune error based on deviation of filtered BBQ data to the moving average + (over moving average window) + - Action error calculated from error on the spectral line + (which in turn is the same as NOISE) + #### 2022-11-01 - v0.6.6 - Bugfixes diff --git a/omc3/__init__.py b/omc3/__init__.py index a14a74061..a8b53987d 100644 --- a/omc3/__init__.py +++ b/omc3/__init__.py @@ -11,7 +11,7 @@ __title__ = "omc3" __description__ = "An accelerator physics tools package for the OMC team at CERN." __url__ = "https://github.com/pylhc/omc3" -__version__ = "0.6.6" +__version__ = "0.7.0" __author__ = "pylhc" __author_email__ = "pylhc@github.com" __license__ = "MIT" diff --git a/omc3/amplitude_detuning_analysis.py b/omc3/amplitude_detuning_analysis.py index c26a7bf11..506e08dbe 100644 --- a/omc3/amplitude_detuning_analysis.py +++ b/omc3/amplitude_detuning_analysis.py @@ -115,8 +115,6 @@ """ -import os -from collections import OrderedDict from datetime import timedelta from pathlib import Path from typing import List, Sequence, Tuple, Dict, Any, Union @@ -124,20 +122,20 @@ import numpy as np import tfs from generic_parser import DotDict -from generic_parser.entrypoint_parser import EntryPointParameters, entrypoint, save_options_to_config +from generic_parser.entrypoint_parser import EntryPointParameters, entrypoint from numpy.typing import ArrayLike from tfs.frame import TfsDataFrame -from omc3.definitions import formats from omc3.definitions.constants import PLANES from omc3.tune_analysis import fitting_tools, kick_file_modifiers, timber_extract -from omc3.tune_analysis.bbq_tools import OutlierFilterOpt, MinMaxFilterOpt +from omc3.tune_analysis.bbq_tools import OutlierFilterOpt, MinMaxFilterOpt, FilterOpts from omc3.tune_analysis.constants import ( get_bbq_col, get_bbq_out_name, get_kick_out_name, - get_mav_col, - get_timber_bbq_key, INPUT_KICK, INPUT_PREVIOUS, CORRECTED, + get_timber_bbq_key, + get_natq_err_col, + INPUT_KICK, INPUT_PREVIOUS, CORRECTED, ) from omc3.tune_analysis.kick_file_modifiers import ( read_timed_dataframe, @@ -266,7 +264,8 @@ def analyse_with_bbq_corrections(opt: DotDict) -> Tuple[TfsDataFrame, TfsDataFra opt, filter_opt = _check_analyse_opt(opt) kick_df, bbq_df = get_kick_and_bbq_df(kick=opt.kick, bbq_in=opt.bbq_in, - beam=opt.beam, filter_opt=filter_opt, output=opt.output) + beam=opt.beam, + filter_opt=filter_opt) kick_plane = opt.plane @@ -277,23 +276,34 @@ def analyse_with_bbq_corrections(opt: DotDict) -> Tuple[TfsDataFrame, TfsDataFra kick_df = double_action_analysis(kick_df, opt.detuning_order, corrected) if opt.output: - LOG.info(f"Writing kick data to file in directory '{opt.output.absolute()}'") - opt.output.mkdir(parents=True, exist_ok=True) - write_timed_dataframe(opt.output / get_kick_out_name(), kick_df) - + _write_dataframes(opt.output, kick_df, bbq_df) return kick_df, bbq_df def get_kick_and_bbq_df(kick: Union[Path, str], bbq_in: Union[Path, str], - beam: int = None, filter_opt = None, - output: Path = None + beam: int = None, + filter_opt: FilterOpts = None, ) -> Tuple[tfs.TfsDataFrame, tfs.TfsDataFrame]: """Load the input data.""" bbq_df = None if bbq_in is not None and bbq_in == INPUT_PREVIOUS: + # NOTE: this is not the same as the "previous BBQ data" option in the GUI. + # That one just uses the previous bbq_ampdet.tfs file (loaded in the "else" below). + # The use-case for the INPUT_PREVIOUS option here is, + # that you can modify the kick_ampdet_xy file manually (e.g. removing kicks) + # and run the fitting on the new data again, + # without having to touch the whole BBQ stuff again (as the values are already in the file). + # Tips: + # - Remove full columns to get rid of the whole kick + # - Add NaNs into NATQ columns you want to ignore (in case you want to keep the other plane for this kick) + # - Add NaNs to the ERRNATQ columns if you want to plot the point (w/o error bars) but not use it for fit LOG.debug("Getting data from previous ampdet kick file") kick_df = read_timed_dataframe(Path(kick) / get_kick_out_name()) kick_df.headers = {k: v for k, v in kick_df.headers.items() if not k.startswith("ODR_")} + + # redo the corrected columns, so you only need to add NaNs into the NATQ columns + LOG.debug("Adding corrected natural tunes and stdev to kick data") + kick_df = kick_file_modifiers.add_corrected_natural_tunes(kick_df) else: LOG.debug("Getting data from kick files") kick_df = read_two_kick_files_from_folder(kick) @@ -307,15 +317,6 @@ def get_kick_and_bbq_df(kick: Union[Path, str], bbq_in: Union[Path, str], LOG.debug("Adding corrected natural tunes and stdev to kick data") kick_df = kick_file_modifiers.add_corrected_natural_tunes(kick_df) - if output: - LOG.info(f"Writing BBQ data to file in directory '{output.absolute()}'") - try: - window = filter_opt.window - except AttributeError: - window = filter_opt[0].window - - x_interval = get_approx_bbq_interval(bbq_df, kick_df.index, window) - write_timed_dataframe(output / get_bbq_out_name(), bbq_df.loc[x_interval[0]: x_interval[1]]) return kick_df, bbq_df @@ -419,7 +420,7 @@ def get_approx_bbq_interval( # Private Functions ------------------------------------------------------------ -def _check_analyse_opt(opt: DotDict): +def _check_analyse_opt(opt: DotDict) -> Tuple[DotDict, FilterOpts]: """Perform manual checks on opt-sturcture.""" LOG.debug("Checking Options.") @@ -542,6 +543,15 @@ def _get_ampdet_data_as_array(data: Dict[Any, AmpDetData], column: str) -> Array return np.vstack([getattr(d, column) for d in data.values()]) +def _write_dataframes(output: Path, kick_df: TfsDataFrame, bbq_df: TfsDataFrame): + LOG.info(f"Writing kick data to file in directory '{output.absolute()}'") + output.mkdir(parents=True, exist_ok=True) + write_timed_dataframe(output / get_kick_out_name(), kick_df) + if bbq_df is not None: + LOG.info(f"Writing BBQ data to file in directory '{output.absolute()}'") + write_timed_dataframe(output / get_bbq_out_name(), bbq_df) + + # Script Mode ################################################################## diff --git a/omc3/harpy/handler.py b/omc3/harpy/handler.py index ca39ee457..ffc54cd39 100644 --- a/omc3/harpy/handler.py +++ b/omc3/harpy/handler.py @@ -159,7 +159,7 @@ def _compute_headers(panda, date=None): pass else: headers[f"{prefix}Q{PLANE_TO_NUM[plane]}"] = np.mean(bpm_tunes) - headers[f"{prefix}Q{PLANE_TO_NUM[plane]}RMS"] = np.std(bpm_tunes) + headers[f"{prefix}Q{PLANE_TO_NUM[plane]}RMS"] = np.std(bpm_tunes) # TODO: not really the RMS? if date: headers["TIME"] = date.strftime(formats.TIME) return headers diff --git a/omc3/optics_measurements/constants.py b/omc3/optics_measurements/constants.py index f94a5bd57..d5ca28c81 100644 --- a/omc3/optics_measurements/constants.py +++ b/omc3/optics_measurements/constants.py @@ -17,6 +17,7 @@ ORBIT_NAME: str = "orbit_" KICK_NAME: str = "kick_" IP_NAME: str = "interaction_point_" +CALIBRATION_FILE: str = "calibration_{plane}.out" # Column Names ----------------------------------------------------------------- # Pre- and Suffixe @@ -40,20 +41,28 @@ DPP: str = "DPP" DPPAMP: str = "DPPAMP" AMPLITUDE: str = "AMP" +NAT_AMPLITUDE: str = "NATAMP" PHASE: str = "PHASE" PHASE_ADV: str = "MU" F1001: str = "F1001" F1010: str = "F1010" +NOISE: str = "NOISE" +CLOSED_ORBIT: str = "CO" SECONDARY_AMPLITUDE_X: str = "AMP01_X" # amplitude of secondary line in horizontal spectrum SECONDARY_AMPLITUDE_Y: str = "AMP10_Y" # amplitude of secondary line in vertical spectrum SECONDARY_FREQUENCY_X: str = "PHASE01_X" # frequency of secondary line in horizontal spectrum SECONDARY_FREQUENCY_Y: str = "PHASE10_Y" # frequency of secondary line in vertical spectrum +# Kick files TIME: str = "TIME" ACTION: str = "2J" SQRT_ACTION: str = "sqrt2J" +# Calibration files +CALIBRATION = "CALIBRATION" +ERR_CALIBRATION = "ERROR_CALIBRATION" # Headers ---------------------------------------------------------------------- RESCALE_FACTOR: str = "RescalingFactor" +BPM_RESOLUTION: str = "BPMResolution" diff --git a/omc3/optics_measurements/kick.py b/omc3/optics_measurements/kick.py index 51156b250..f4503eed0 100644 --- a/omc3/optics_measurements/kick.py +++ b/omc3/optics_measurements/kick.py @@ -20,7 +20,8 @@ NAT_TUNE, PEAK2PEAK, RES, RESCALE_FACTOR, RMS, - SQRT_ACTION, TIME, TUNE, S) + SQRT_ACTION, TIME, TUNE, S, NOISE, CLOSED_ORBIT) +from omc3.utils.stats import weighted_mean, weighted_error def calculate(measure_input, input_files, scale, header_dict, plane): @@ -37,7 +38,7 @@ def calculate(measure_input, input_files, scale, header_dict, plane): `TfsDataFrame` containing actions and their errors. """ try: - kick_frame = _getkick(measure_input, input_files, plane) + kick_frame = _get_kick(measure_input, input_files, plane) except IndexError: # occurs if either no x or no y files exist return pd.DataFrame kick_frame = _rescale_actions(kick_frame, scale, plane) @@ -58,7 +59,7 @@ def _rescale_actions(df, scaling_factor, plane): return df -def _getkick(measure_input, files, plane): +def _get_kick(measure_input, files, plane): load_columns, calc_columns, column_types = _get_column_mapping(plane) kick_frame = pd.DataFrame(data=0., index=range(len(files[plane])), @@ -71,22 +72,54 @@ def _getkick(measure_input, files, plane): kick_frame.loc[i, col] = df[src] # calculate data from measurement - kick_frame.loc[i, calc_columns] = _gen_kick_calc(measure_input, df, plane) + kick_frame.loc[i, calc_columns] = _get_action(measure_input, df, plane) kick_frame = kick_frame.astype(column_types) return kick_frame -def _gen_kick_calc(meas_input, lin, plane): +def _get_action(meas_input, lin: pd.DataFrame, plane: str) -> np.ndarray: """ - Takes either PK2PK/2 for kicker excitation or AMP for AC-dipole excitation + Calculates action (2J and sqrt(2J)) and its errors from BPM data in lin-df. + Takes either PK2PK/2 for kicker excitation or AMP for AC-dipole excitation, + as the amplitude of the oscillations for single kicks falls off over turns, + and hence the amplitude of the main line does not represent the initial kick, + whereas it is constant for the driven excitation. + Reminder: A = sqrt(2J \beta) . + + TODO (jdilly 07.09.2022): + beta_phase instead of beta_model as stated below Eq. (11) in + PHYS. REV. ACCEL. BEAMS 23, 042801 (2020) + + Returns: + sqrt(2J), error sqrt(2J), 2J, error 2J as (1x4) array """ - frame = pd.merge(_get_model_arc_betas(meas_input, plane), lin.loc[:, [f"{AMPLITUDE}{plane}", PEAK2PEAK]], + frame = pd.merge(_get_model_arc_betas(meas_input, plane), lin, how='inner', left_index=True, right_index=True) - amps = (frame.loc[:, f"{AMPLITUDE}{plane}"].to_numpy() if meas_input.accelerator.excitation - else frame.loc[:, PEAK2PEAK].to_numpy() / 2.0) - meansqrt2j = amps / np.sqrt(frame.loc[:, f"{BETA}{plane}"].to_numpy()) - mean2j = np.square(amps) / frame.loc[:, f"{BETA}{plane}"].to_numpy() - return np.array([np.mean(meansqrt2j), np.std(meansqrt2j), np.mean(mean2j), np.std(mean2j)]) + + if meas_input.accelerator.excitation: + amps = frame.loc[:, f"{AMPLITUDE}{plane}"].to_numpy() + err_amps = frame.loc[:, f"{ERR}{AMPLITUDE}{plane}"].to_numpy() + else: + amps = frame.loc[:, PEAK2PEAK].to_numpy() / 2.0 + err_amps = frame.loc[:, f"{CLOSED_ORBIT}{RMS}"].to_numpy() + + # sqrt(2J) --- + sqrt_beta = np.sqrt(frame.loc[:, f"{BETA}{plane}"].to_numpy()) + + actions_sqrt2j = amps / sqrt_beta + errors_sqrt2j = err_amps / sqrt_beta + + mean_sqrt2j = weighted_mean(data=actions_sqrt2j, errors=errors_sqrt2j) + err_sqrt2j = weighted_error(data=actions_sqrt2j, errors=errors_sqrt2j) + + # 2J --- + actions_2j = np.square(amps) / frame.loc[:, f"{BETA}{plane}"].to_numpy() + errors_2j = 2 * amps * err_amps / frame.loc[:, f"{BETA}{plane}"].to_numpy() + + mean_2j = weighted_mean(data=actions_2j, errors=errors_2j) + err_2j = weighted_error(data=actions_2j, errors=errors_2j) + + return np.array([mean_sqrt2j, err_sqrt2j, mean_2j, err_2j]) def _get_model_arc_betas(measure_input, plane): diff --git a/omc3/optics_measurements/measure_optics.py b/omc3/optics_measurements/measure_optics.py index 2c5570db4..feedb7068 100644 --- a/omc3/optics_measurements/measure_optics.py +++ b/omc3/optics_measurements/measure_optics.py @@ -10,6 +10,7 @@ import sys from collections import OrderedDict from copy import deepcopy +from typing import Dict, List import numpy as np import pandas as pd @@ -21,7 +22,9 @@ chromatic, dispersion, dpp, iforest, interaction_point, kick, phase, rdt, tune, crdt, coupling) -from omc3.optics_measurements.constants import (CHROM_BETA_NAME, ERR, EXT) +from omc3.optics_measurements.constants import ( + CHROM_BETA_NAME, ERR, EXT, AMPLITUDE, ERR_CALIBRATION, CALIBRATION, CALIBRATION_FILE, NAME, BPM_RESOLUTION +) from omc3.utils import iotools, logging_tools LOGGER = logging_tools.get_logger(__name__, level_console=logging_tools.INFO) @@ -47,7 +50,7 @@ def measure_optics(input_files, measure_input): invariants = {} phase_dict = {} for plane in PLANES: - phase_dict[plane], out_dfs = phase.calculate(measure_input, input_files, tune_dict, plane) + phase_dict[plane], out_dfs = phase.calculate(measure_input, input_files, tune_dict, plane) phase.write(out_dfs, [common_header]*4, measure_input.outputdir, plane) phase.write_special(measure_input, phase_dict[plane]['free'], tune_dict[plane]["QF"], plane) if measure_input.only_coupling: @@ -150,7 +153,8 @@ def __init__(self, files_to_analyse, optics_opt): def _repair_backwards_compatible_frame(df, plane): """ Multiplies unscaled amplitudes by 2 to get from complex amplitudes to the real ones. - This is for backwards compatibility with Drive. + This is for backwards compatibility with Drive, + i.e. harpy has this """ df[f"AMP{plane}"] = df.loc[:, f"AMP{plane}"].to_numpy() * 2 if f"NATAMP{plane}" in df.columns: @@ -220,16 +224,34 @@ def bpms(self, plane=None, dpp_value=None): indices[0] = indices[0].intersection(ind) return indices[0] - def calibrate(self, calibs): + def calibrate(self, calibs: Dict[str, pd.DataFrame]): + """ + Use calibration data to rescale amplitude and amplitude error. + + Args: + calibs (Dict): Plane-Dictionary with DataFrames of calibration data. + + """ if calibs is None: return + for plane in PLANES: + bpm_resolution = calibs[plane].headers.get(BPM_RESOLUTION, 1e-4) # TODO: Default of 0.1 mm is LHC specific for i in range(len(self[plane])): - data = pd.merge(self[plane][i].loc[:, ["AMP" + plane]], calibs[plane], how='left', - left_index=True, right_index=True).fillna( - value={"CALIBRATION": 1., "ERROR_CALIBRATION": 0.}) - self[plane][i][f"AMP{plane}"] = self[plane][i].loc[:, f"AMP{plane}"] * data.loc[:, "CALIBRATION"] - self[plane][i][f"{ERR}AMP{plane}"] = data.loc[:, "ERROR_CALIBRATION"] # TODO + # Merge all measurement BPMs into calibration data (only few BPMs), + # fill missing values with a scaling of 1 and estimated error of 0.1mm (emaclean estimate) + data = pd.merge(self[plane][i].loc[:, [f"{AMPLITUDE}{plane}"]], calibs[plane], + how='left', left_index=True, right_index=True).fillna( + value={CALIBRATION: 1.}) # ERR_CALIBRATION is relative, NaN filled with absolute value below + + # Scale amplitude with the calibration + self[plane][i][f"AMP{plane}"] = self[plane][i].loc[:, f"AMP{plane}"] * data.loc[:, CALIBRATION] + + # Sum Amplitude Error (absolute) and Calibration Error (relative) + self[plane][i][f"{ERR}{AMPLITUDE}{plane}"] = np.sqrt( + self[plane][i][f"{ERR}{AMPLITUDE}{plane}"]**2 + + ((self[plane][i][f"{AMPLITUDE}{plane}"] * data.loc[:, ERR_CALIBRATION]).fillna(bpm_resolution))**2 + ) @ staticmethod def get_columns(frame, column): @@ -268,7 +290,7 @@ def copy_calibration_files(outputdir, calibrationdir): return None calibs = {} for plane in PLANES: - cal_file = f"calibration_{plane.lower()}.out" + cal_file = CALIBRATION_FILE.format(plane=plane.lower()) iotools.copy_item(os.path.join(calibrationdir, cal_file), os.path.join(outputdir, cal_file)) - calibs[plane] = tfs.read(os.path.join(outputdir, cal_file)).set_index("NAME") + calibs[plane] = tfs.read(os.path.join(outputdir, cal_file)).set_index(NAME) return calibs diff --git a/omc3/scripts/betabeatsrc_output_converter.py b/omc3/scripts/betabeatsrc_output_converter.py index be729a6a5..54c1c2bd2 100644 --- a/omc3/scripts/betabeatsrc_output_converter.py +++ b/omc3/scripts/betabeatsrc_output_converter.py @@ -1,6 +1,6 @@ """ BetaBeat.src Output Converter -------------- +----------------------------- Script to convert most important output files produced by ``BetaBeat.src`` / ``GetLLM`` into the standard format used in ``omc3`` to allow straight forward comparison of the two. diff --git a/omc3/tune_analysis/bbq_tools.py b/omc3/tune_analysis/bbq_tools.py index 9280111be..3ca1ff642 100644 --- a/omc3/tune_analysis/bbq_tools.py +++ b/omc3/tune_analysis/bbq_tools.py @@ -13,7 +13,7 @@ from omc3.utils import logging_tools from omc3.utils.outliers import get_filter_mask -from dataclasses import dataclass +from dataclasses import dataclass LOG = logging_tools.get_logger(__name__) @@ -52,7 +52,7 @@ class MinMaxFilterOpt: FilterOpts = Union[OutlierFilterOpt, Tuple[MinMaxFilterOpt, MinMaxFilterOpt]] -def get_moving_average(data_series, filter_opt: MinMaxFilterOpt): +def get_moving_average(data_series: pd.Series, filter_opt: MinMaxFilterOpt) -> Tuple[pd.Series, pd.Series, ArrayLike]: """ Get a moving average of the ``data_series`` over ``length`` entries. The data can be filtered beforehand. The values are shifted, so that the averaged value takes ceil((length-1)/2) @@ -63,7 +63,7 @@ def get_moving_average(data_series, filter_opt: MinMaxFilterOpt): filter_opt: Options for the filtering, see `:class:`omc3.tune_analysis.bbq_tools.MinMaxFilterOpt`. Returns: - A filtered and averaged `Series` and the mask used for filtering data. + A filtered and averaged `Series`, another `Series` with the error and the mask used for filtering data. """ LOG.debug(f"Cutting data and calculating moving average of length {filter_opt.window:d}.") @@ -83,16 +83,16 @@ def get_moving_average(data_series, filter_opt: MinMaxFilterOpt): cut_mask = min_mask | max_mask | data_series.isna() _is_almost_empty_mask(~cut_mask, filter_opt.window) - data_mav, std_mav = _get_interpolated_moving_average(data_series, cut_mask, filter_opt.window) + data_mav, err_mav = _get_interpolated_moving_average(data_series, cut_mask, filter_opt.window) if filter_opt.fine_window is not None: min_mask = data_series <= (data_mav - filter_opt.fine_cut) max_mask = data_series >= (data_mav + filter_opt.fine_cut) cut_mask = min_mask | max_mask | data_series.isna() _is_almost_empty_mask(~cut_mask, filter_opt.fine_window) - data_mav, std_mav = _get_interpolated_moving_average(data_series, cut_mask, filter_opt.fine_window) + data_mav, err_mav = _get_interpolated_moving_average(data_series, cut_mask, filter_opt.fine_window) - return data_mav, std_mav, ~cut_mask + return data_mav, err_mav, ~cut_mask def clean_outliers_moving_average(data_series: pd.Series, filter_opt: OutlierFilterOpt) -> Tuple[pd.Series, pd.Series, NDArray[bool]]: @@ -114,8 +114,8 @@ def clean_outliers_moving_average(data_series: pd.Series, filter_opt: OutlierFil mask[i:i+window] &= get_filter_mask(data_series[i:i+window], limit=limit, mask=init_mask[i:i+window]) _is_almost_empty_mask(mask, window) - data_mav, std_mav = _get_interpolated_moving_average(data_series, ~mask, window) - return data_mav, std_mav, mask + data_mav, err_mav = _get_interpolated_moving_average(data_series, ~mask, window) + return data_mav, err_mav, mask # Private methods ############################################################ @@ -151,18 +151,22 @@ def _get_interpolated_moving_average(data_series: pd.Series, clean_mask: Union[p shift = -int((length-1)/2) # Shift average to middle value - # calculate mean and std, fill NaNs at the ends + # calculate mean and fill NaNs at the ends data_mav = data.rolling(length).mean().shift(shift).fillna( method="bfill").fillna(method="ffill") - std_mav = data.rolling(length).std().shift(shift).fillna( - method="bfill").fillna(method="ffill") + + # calculate deviation to the moving average and fill NaNs at the ends + std_mav = np.sqrt(((data-data_mav)**2).rolling(length).mean().shift(shift).fillna( + method="bfill").fillna(method="ffill")) + err_mav = std_mav / np.sqrt(length) if is_datetime_index: # restore index from input series data_mav.index = data_series.index std_mav.index = data_series.index + err_mav.index = data_series.index - return data_mav, std_mav + return data_mav, err_mav def _is_almost_empty_mask(mask, av_length): diff --git a/omc3/tune_analysis/fitting_tools.py b/omc3/tune_analysis/fitting_tools.py index 889ca1725..5ddde93a7 100644 --- a/omc3/tune_analysis/fitting_tools.py +++ b/omc3/tune_analysis/fitting_tools.py @@ -7,7 +7,7 @@ """ from collections import namedtuple -from typing import Sequence, Dict, List +from typing import Sequence, Dict, List, Tuple import numpy as np import pandas as pd @@ -35,12 +35,14 @@ def poly_func(beta, x): def do_odr(x: pd.Series, y: pd.Series, xerr: pd.Series, yerr: pd.Series, order: int): """ Returns the odr fit. + Important Convention: - The beta-parameter in the ODR models go upwards with order, i.e. - | beta[0] = y-Axis offset - | beta[1] = slope - | beta[2] = quadratic term - | etc. + The beta-parameter in the ODR models go upwards with order, i.e. + + | beta[0] = y-Axis offset + | beta[1] = slope + | beta[2] = quadratic term + | etc. Args: x: `Series` of x data. @@ -58,6 +60,7 @@ def do_odr(x: pd.Series, y: pd.Series, xerr: pd.Series, yerr: pd.Series, order: LOG.debug(f"ODR fit input (from polynomial fit): {fit_np}") # Actual ODR --- + xerr, yerr = _check_exact_zero_errors(xerr, yerr) odr = ODR(data=RealData(x=x, y=y, sx=xerr, sy=yerr), model=Model(get_poly_fun(order)), beta0=fit_np.coef) @@ -157,6 +160,7 @@ def do_2d_kicks_odr(x: ArrayLike, y: ArrayLike, xerr: ArrayLike, yerr: ArrayLike LOG.info(f"\nDetuning estimate without errors (curve fit):\n{res_str}\n") # Actual ODR --- + xerr, yerr = _check_exact_zero_errors(xerr, yerr) odr = ODR(data=RealData(x=x, y=y, sx=xerr, sy=yerr), # model=Model(first_order_detuning_2d), model=Model(first_order_detuning_2d, fjacd=first_order_detuning_2d_jacobian), @@ -180,3 +184,30 @@ def _filter_nans(*args: ArrayLike) -> List[ArrayLike]: a = a[:, :, ~np.isnan(a).any(axis=0).any(axis=0)] return list(a) + +def _check_exact_zero_errors(xerr: ArrayLike, yerr: ArrayLike) -> Tuple[np.ndarray, np.ndarray]: + """Check if errors are exact zero and replace with minimum error, if so. + Done because ODR crashes on exact zero error-bars. + + Beware that the output will always be np.arrays, even if the input is pd.Series. + """ + def check_exact_zero_per_plane(err, plane): + if (err != 0).all(): # no problem + return err + + # best way to work with array and series? + minval = np.where(err == 0, np.inf, err).min() # assumes all values >=0 + + if np.isinf(minval): + raise ValueError(f"All errors are exactly zero in the {plane} plane." + f" ODR cannot be performed.") + LOG.warning(f"Exact zeros in {plane} errors found." + f" Replaced by {minval} (the minimum value) to be able to perform ODR.") + return np.where(err == 0, minval, err) + + xerr = check_exact_zero_per_plane(xerr, "horizontal") + yerr = check_exact_zero_per_plane(yerr, "vertical") + return xerr, yerr + + + diff --git a/omc3/tune_analysis/kick_file_modifiers.py b/omc3/tune_analysis/kick_file_modifiers.py index a08882555..89227f896 100644 --- a/omc3/tune_analysis/kick_file_modifiers.py +++ b/omc3/tune_analysis/kick_file_modifiers.py @@ -4,17 +4,14 @@ Functions to add data to or extract data from **kick_ac** files. """ -import os from pathlib import Path from typing import Tuple, Union, Sequence import numpy as np import pandas as pd -from numpy.typing import ArrayLike, NDArray -from scipy import odr import tfs +from scipy import odr from tfs import TfsDataFrame -from generic_parser.dict_parser import DotDict from omc3.definitions.constants import PLANES from omc3.optics_measurements.constants import KICK_NAME, TIME, ERR, NAT_TUNE, ACTION @@ -117,11 +114,10 @@ def _filter_bbq_outliers(bbq_df: tfs.TfsDataFrame, filter_opt: OutlierFilterOpt) bbq_df.headers[header_window] = filter_opt.window bbq_df.headers[header_limit] = filter_opt.limit for plane in PLANES: - bbq_mav, bbq_std, mask = bbq_tools.clean_outliers_moving_average(bbq_df[get_bbq_col(plane)], + bbq_mav, bbq_err, mask = bbq_tools.clean_outliers_moving_average(bbq_df[get_bbq_col(plane)], filter_opt=filter_opt) bbq_df[get_mav_col(plane)] = bbq_mav - # bbq_df[get_mav_err_col(plane)] = bbq_std # this is too large - bbq_df[get_mav_err_col(plane)] = 0. # TODO to be discussed with Ewen and Tobias (jdilly, 2022-05-23) + bbq_df[get_mav_err_col(plane)] = bbq_err # TODO to be discussed with Ewen and Tobias (jdilly, 2022-05-23) bbq_df[get_used_in_mav_col(plane)] = mask return bbq_df @@ -132,14 +128,13 @@ def _filter_bbq_cut(bbq_df: tfs.TfsDataFrame, filter_opts: Sequence[MinMaxFilter bbq_df.headers[get_fine_cut_header()] = filter_opts[0].fine_cut for idx, plane in enumerate(PLANES): - bbq_mav, bbq_std, mask = bbq_tools.get_moving_average(bbq_df[get_bbq_col(plane)], filter_opts[idx]) + bbq_mav, bbq_err, mask = bbq_tools.get_moving_average(bbq_df[get_bbq_col(plane)], filter_opts[idx]) bbq_df.headers[get_min_tune_header(plane)] = filter_opts[idx].min bbq_df.headers[get_max_tune_header(plane)] = filter_opts[idx].max bbq_df[get_mav_col(plane)] = bbq_mav - # bbq_df[get_mav_err_col(plane)] = bbq_std # this is too large - bbq_df[get_mav_err_col(plane)] = 0. # TODO to be discussed with Ewen and Tobias (jdilly, 2022-05-23) + bbq_df[get_mav_err_col(plane)] = bbq_err # TODO to be discussed with Ewen and Tobias (jdilly, 2022-05-23) bbq_df[get_used_in_mav_col(plane)] = mask return bbq_df @@ -331,10 +326,10 @@ def convert_bbs_kickdataframe(df: tfs.TfsDataFrame) -> tfs.TfsDataFrame: if COEFFICIENT.format(order=1) in key: df.headers[key] = df.headers[key] * 1e6 # inverse um to inverse m - # use uncorrected error columns for corrected data - if ERR in key: - new_key = key.replace(ERR, f"{ERR}{CORRECTED}") - df.headers[new_key] = df.headers[key] + # # use uncorrected error columns for corrected data + # if ERR in key: + # new_key = key.replace(ERR, f"{ERR}{CORRECTED}") + # df.headers[new_key] = df.headers[key] # add err headers for coefficient 0 (offset) for tune in PLANES: @@ -367,12 +362,12 @@ def _rename_old_header(key: str): key = key.replace(old, new) key = key.replace("OFFSET", COEFFICIENT.format(order=0)).replace("SLOPE", COEFFICIENT.format(order=1)) - # this was the last prefix in the old file, so replace this first + # CORR was the last prefix in the old file, so replace this first if "_CORR" in key: parts = key.split("_") - key = "_".join(parts[:-2] + [f"{CORRECTED}{parts[-2]}"]) + key = "_".join(parts[:2] + [f"{CORRECTED}{parts[2]}"] + parts[3:-1]) - # and then remove also this and add ERR + # and then remove also STD and add ERR if "_STD" in key: parts = key.split("_") key = "_".join(parts[:-2] + [f"{ERR}{parts[-2]}"]) diff --git a/omc3/utils/outliers.py b/omc3/utils/outliers.py index 49903f972..657d102eb 100644 --- a/omc3/utils/outliers.py +++ b/omc3/utils/outliers.py @@ -1,4 +1,4 @@ -r""" +""" Outliers -------- @@ -8,12 +8,45 @@ from numpy.typing import ArrayLike from scipy.stats import t +from omc3.utils import logging_tools + +LOGGER = logging_tools.get_logger(__name__) + def get_filter_mask(data: ArrayLike, x_data: ArrayLike = None, limit: float = 0.0, niter: int = 20, nsig: int = None, mask: ArrayLike = None) -> ArrayLike: - """ + r""" Filters the array of values which are meant to be constant or a linear function of the x-data array if that is provided, by checking how many sigmas they are deviating from the average. + + The outlier filtering function is utilized at multiple stages of the data analysis. + It removes data points in the tails of the measured distribution, + which are too populated due to the finite sample size, + assuming a normal distribution specified by measured mean and standard + deviation of the given data. + A data point, outside of a user-specified limit, + is removed if there is less than a 50% chance + that it is from the specified normal distribution. + + In particular: + The function gets an array :math:`y` of data of length :math:`n_y`, + which can be any scalar data and is currently used for e.g. tune data + measured per BPM or a time series from the BBQ. + In addition, a cleaning ``limit`` can be given, + inside which the data points are always kept. + Further an optional array :math:`x` (via the parameter ``x_data``) + can be given, in which case a linear fit :math:`y = ax + c` is attempted + to remove any slope on the data, before calculating momenta on the data. + An upper boundary in :math:`n_\sigma` is then determined from the + percent point function (ppf) of a Student's t-distribution, + given :math:`n_x` degrees of freedom at :math:`100\% - 50\% / n_y`. + Iteratively, mean :math:`\left< y \right>` and standard deviation :math:`\sigma` + of the data is calculated and only data within :math:`n_\sigma \cdot \sigma`, + or the given cleaning limit, around :math:`\left< y \right>` + is kept for the next iteration. The loop stops either after 20 iterations, + when there is no more data outside the boundaries or + when there are less than three data points remaining. + Returns a filter mask for the original array (meaning ``True`` for elements that should be kept). Args: @@ -36,6 +69,8 @@ def get_filter_mask(data: ArrayLike, x_data: ArrayLike = None, limit: float = 0. and ``False`` for the ones that should be filtered. """ + LOGGER.debug("Creating Outlier-Filter mask.") + if x_data is not None: if not len(data) == len(x_data): raise ValueError("Datasets are not equally long.") @@ -49,40 +84,40 @@ def get_filter_mask(data: ArrayLike, x_data: ArrayLike = None, limit: float = 0. if nsig is None: nsig = _get_significance_cut_from_length(np.sum(mask)) - # To fulfill the condition for the first iteration: - prevlen = np.sum(mask) + 1 + # Set number of remaining points to check decrease in loop + n_previous = np.sum(mask) + 1 # initialization for the first check # Cleaning iteration for _ in range(niter): # check that number of points decreases and some are left - if not ((np.sum(mask) < prevlen) and - (np.sum(mask) > 2)): + n_current = np.sum(mask) + if not ((n_current < n_previous) and (n_current > 2)): break - prevlen = np.sum(mask) + n_previous = n_current # get the (remaining) data-points used to get moments from, # if x-data is given, fit and remove slope. if x_data is None: - y, y_orig = _get_data(mask, data) + y, y_orig = data[mask], data else: y, y_orig = _get_data_without_slope(mask, x_data, data) # keep only points within nsig sigma of the remaining data. avg, std = np.mean(y), np.std(y) mask = np.logical_and(mask, np.abs(y_orig - avg) < np.max([limit, nsig * std])) + else: + LOGGER.debug("Outlier Filter loop exceeds maximum number of iterations." + " Current filter-mask will be used.") return mask def _get_data_without_slope(mask, x, y): + """ Remove the slope on the data by performing a linear fit. """ m, b = np.polyfit(x[mask], y[mask], 1) return y[mask] - b - m * x[mask], y - b - m * x -def _get_data(mask, data): - return data[mask], data - - -# Set the sigma cut, that expects 1 value to be cut -# if it is sample of normal distribution def _get_significance_cut_from_length(length): + """ Set the sigma cut, that expects one value to be cut + if it is sample of normal distribution.""" return t.ppf(1 - 0.5 / length, length) diff --git a/omc3/utils/stats.py b/omc3/utils/stats.py index b7130f09d..f8eb0f551 100644 --- a/omc3/utils/stats.py +++ b/omc3/utils/stats.py @@ -14,7 +14,11 @@ import numpy as np from scipy.special import erf from scipy.stats import t + from omc3.definitions.constants import PI2, PI2I +from omc3.utils import logging_tools + +LOGGER = logging_tools.get_logger(__name__) CONFIDENCE_LEVEL = (1 + erf(1 / np.sqrt(2))) / 2 @@ -36,7 +40,6 @@ def circular_mean(data, period=PI2, errors=None, axis=None): Returns: Returns the weighted circular average along the specified axis. """ - phases = data * PI2I / period weights = weights_from_errors(errors, period=period) @@ -134,10 +137,19 @@ def _get_shape(orig_shape, axis): def weighted_error(data, errors=None, axis=None, t_value_corr=True): """ Computes error of weighted average along the specified axis. + This is similar to calculating the standard deviation on the data, + but with both, the average to which the deviation is calculated, + as well as then the averaging over the deviations weighted by + weights based on the errors. + + In addition, the weighted variance is unbiased by an unbias-factor + n / (n-1), where n is the :meth:`omc3.utils.stats.effective_sample_size` . + Additionally, a (student) t-value correction can be performed (done by default) + which corrects the estimate for small data sets. Parameters: data: array-like - Contains the data to be averaged + Contains the data on which the weighted error on the average is calculated. errors: array-like, optional Contains errors associated with the values in data, it is used to calculated weights axis: int or tuple of ints, optional @@ -150,8 +162,11 @@ def weighted_error(data, errors=None, axis=None, t_value_corr=True): """ weights = weights_from_errors(errors) weighted_average = np.average(data, axis=axis, weights=weights) - (sample_variance, sum_of_weights) = np.ma.average(np.square(np.abs(data - weighted_average.reshape( - _get_shape(data.shape, axis)))), weights=weights, axis=axis, returned=True) + weighted_average = weighted_average.reshape(_get_shape(data.shape, axis)) + (sample_variance, sum_of_weights) = np.ma.average( + np.square(np.abs(data - weighted_average)), + weights=weights, axis=axis, returned=True + ) if weights is not None: sample_variance = sample_variance + 1 / sum_of_weights error = np.nan_to_num(np.sqrt(sample_variance * unbias_variance(data, weights, axis=axis))) @@ -202,18 +217,33 @@ def weights_from_errors(errors, period=PI2): if errors is None: return None if np.any(np.isnan(errors)): - # LOGGER.warning("Nans found, weights are not used.") + LOGGER.warning("NaNs found, weights are not used.") return None if np.any(np.logical_not(errors)): - # LOGGER.warning("Zeros found, weights are not used.") + LOGGER.warning("Zeros found, weights are not used.") return None return 1 / np.square(errors * PI2 / period) def effective_sample_size(data, weights, axis=None): - """ - Computes effective sample size of weighted data along specifies axis, - the minimum value returned is 2 to avoid non-reasonable error blow-up + r""" + Computes effective sample size of weighted data along specified axis, + the minimum value returned is 2 to avoid non-reasonable error blow-up. + + It is calculated via Kish's approximate formula + from the (not necessarily normalized) weights :math:`w_i` (see wikipedia): + + .. math:: + + n_\mathrm{eff} = \frac{\left(\sum_i w_i\right)^2}{\sum_i w_i^2} + + What it represents: + "In most instances, weighting causes a decrease in the statistical significance of results. + The effective sample size is a measure of the precision of the survey + (e.g., even if you have a sample of 1,000 people, an effective sample size of 100 would indicate + that the weighted sample is no more robust than a well-executed un-weighted + simple random sample of 100 people)." - + https://wiki.q-researchsoftware.com/wiki/Weights,_Effective_Sample_Size_and_Design_Effects Parameters: data: array-like @@ -233,8 +263,19 @@ def effective_sample_size(data, weights, axis=None): def unbias_variance(data, weights, axis=None): - """ - Computes a correction factor to unbias variance of weighted average of data along specified axis + r""" + Computes a correction factor to unbias variance of weighted average of data along specified axis, + e.g. transform the standard deviation 1 + + .. math:: + + \sigma^2 = \frac{1}{N} \sum_{i=1}^N (x_i - x_\mathrm{mean})^2 + + into an un-biased estimator + + .. math:: + + \sigma^2 = \frac{1}{N-1} \sum_{i=1}^N (x_i - x_\mathrm{mean})^2 Parameters: data: array-like @@ -255,16 +296,21 @@ def unbias_variance(data, weights, axis=None): def t_value_correction(sample_size): """ - Calculates the multiplicative correction factor to determine standard deviation of normally - distributed quantity from standard deviation of its finite-sized sample - the minimum allowed sample size is 2 to avoid non-reasonable error blow-up - for smaller sample sizes 2 is used instead + Calculates the multiplicative correction factor to determine standard deviation of + a normally distributed quantity from standard deviation of its finite-sized sample. + The minimum allowed sample size is 2 to avoid non-reasonable error blow-up + for smaller sample sizes 2 is used instead. + + Note (jdilly): In other words, this transforms the area of 1 sigma under + the given student t distribution to the 1 sigma area of a normal distribution + (this transformation is where the ``CONFIDENCE LEVEL`` comes in). + I hope that makes the intention more clear. Args: sample_size: array-like Returns: - multiplicative correction factor(s) of same shape as sample_size - can contain nans + multiplicative correction factor(s) of same shape as sample_size. + Can contain nans. """ return t.ppf(CONFIDENCE_LEVEL, np.where(sample_size > 2, sample_size, 2) - 1) diff --git a/tests/accuracy/twiss_to_lin.py b/tests/accuracy/twiss_to_lin.py index b4aa14fef..f4f93fa3e 100644 --- a/tests/accuracy/twiss_to_lin.py +++ b/tests/accuracy/twiss_to_lin.py @@ -72,13 +72,23 @@ def generate_lin_files(model, tune, nattune, motion='_d', dpp=0.0, beam_directio + dpp * model.loc[:, f"DMU{plane}{motion}"] + (noise_freq_domain / (2 * np.pi)) *np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction lin[f"ERRMU{plane}"] = noise_freq_domain / (2 * np.pi) + + lin[f"ERRAMP{plane}"] = noise_freq_domain * np.random.randn(nbpms) + lin[f"AMP{plane}"] = np.sqrt(model.loc[:, f"BET{plane}{motion}"] * ACTION * (1 + dpp * np.sin(2 * np.pi * model.loc[:, f"PHI{plane}{motion}"]) - * model.loc[:, f"W{plane}{motion}"])) + noise_freq_domain * np.random.randn(nbpms) + * model.loc[:, f"W{plane}{motion}"])) + lin[f"ERRAMP{plane}"] + + lin[f"ERRAMP{plane}"] = np.abs(lin[f"ERRAMP{plane}"]) # * 2 divided by two removed? lin[f"NATMU{plane}"] = np.remainder(model.loc[:, f"MU{plane}{MOTION['free']}"] + (NAT_OVER_DRV * noise_freq_domain / (2 * np.pi)) * np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction - lin[f"NATAMP{plane}"] = NAT_OVER_DRV * np.sqrt(ACTION * model.loc[:, f"BET{plane}{MOTION['free']}"]) + noise_freq_domain * np.random.randn(nbpms) + + lin[f"ERRNATAMP{plane}"] = noise_freq_domain * np.random.randn(nbpms) + + lin[f"NATAMP{plane}"] = NAT_OVER_DRV * np.sqrt(ACTION * model.loc[:, f"BET{plane}{MOTION['free']}"]) + lin[f"ERRNATAMP{plane}"] + + lin[f"ERRNATAMP{plane}"] = np.abs(lin[f"ERRNATAMP{plane}"]) # * 2 divided by two removed? lin[f"PHASE{COUPLING_INDICES[plane]}"] = np.remainder(model.loc[:, f"MU{OTHER[plane]}{motion}"] + dpp * model.loc[:, f"DMU{OTHER[plane]}{motion}"] + (COUPLING * noise_freq_domain / (2 * np.pi)) * np.random.randn(nbpms) + np.random.rand(), 1) * beam_direction diff --git a/tests/inputs/spec_test.sdds.linx b/tests/inputs/spec_test.sdds.linx index 323fdd392..e73e53c55 100644 --- a/tests/inputs/spec_test.sdds.linx +++ b/tests/inputs/spec_test.sdds.linx @@ -4,7 +4,7 @@ @ NATQ1 %le 0.269333646572 @ NATQ1RMS %le 0.0533662538369 @ filename %s "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2018-08-22/LHCB1/Measurements/Beam1@Turn@2018_08_22@18_47_58_442/Beam1@Turn@2018_08_22@18_47_58_442.sdds.linx.notcleaned" -* NAME S PK2PK CO CORMS BPM_RES NOISE AVG_NOISE TUNEX AMPX MUX NATTUNEX NATMUX NATAMPX AMP_21 PHASE_21 FREQ_21 AMP02 PHASE02 FREQ02 AMP_30 PHASE_30 FREQ_30 AMP20 PHASE20 FREQ20 AMP11 PHASE11 FREQ11 AMP2_2 PHASE2_2 FREQ2_2 AMP30 PHASE30 FREQ30 AMP1_2 PHASE1_2 FREQ1_2 AMP_1_1 PHASE_1_1 FREQ_1_1 AMP_13 PHASE_13 FREQ_13 AMP0_2 PHASE0_2 FREQ0_2 AMP_20 PHASE_20 FREQ_20 AMP01 PHASE01 FREQ01 AMP_1_2 PHASE_1_2 FREQ_1_2 AMP12 PHASE12 FREQ12 MUXSYNC NOISE_SCALED ERR_AMPX ERR_MUX ERR_NATAMPX ERR_NATMUX ERR_AMP_21 ERR_PHASE_21 ERR_AMP02 ERR_PHASE02 ERR_AMP_30 ERR_PHASE_30 ERR_AMP20 ERR_PHASE20 ERR_AMP11 ERR_PHASE11 ERR_AMP2_2 ERR_PHASE2_2 ERR_AMP30 ERR_PHASE30 ERR_AMP1_2 ERR_PHASE1_2 ERR_AMP_1_1 ERR_PHASE_1_1 ERR_AMP_13 ERR_PHASE_13 ERR_AMP0_2 ERR_PHASE0_2 ERR_AMP_20 ERR_PHASE_20 ERR_AMP01 ERR_PHASE01 ERR_AMP_1_2 ERR_PHASE_1_2 ERR_AMP12 ERR_PHASE12 +* NAME S PK2PK CO CORMS BPM_RES NOISE AVG_NOISE TUNEX AMPX MUX NATTUNEX NATMUX NATAMPX AMP_21 PHASE_21 FREQ_21 AMP02 PHASE02 FREQ02 AMP_30 PHASE_30 FREQ_30 AMP20 PHASE20 FREQ20 AMP11 PHASE11 FREQ11 AMP2_2 PHASE2_2 FREQ2_2 AMP30 PHASE30 FREQ30 AMP1_2 PHASE1_2 FREQ1_2 AMP_1_1 PHASE_1_1 FREQ_1_1 AMP_13 PHASE_13 FREQ_13 AMP0_2 PHASE0_2 FREQ0_2 AMP_20 PHASE_20 FREQ_20 AMP01 PHASE01 FREQ01 AMP_1_2 PHASE_1_2 FREQ_1_2 AMP12 PHASE12 FREQ12 MUXSYNC NOISE_SCALED ERRAMPX ERRMUX ERRNATAMPX ERRNATMUX ERRAMP_21 ERRPHASE_21 ERRAMP02 ERRPHASE02 ERRAMP_30 ERRPHASE_30 ERRAMP20 ERRPHASE20 ERRAMP11 ERRPHASE11 ERRAMP2_2 ERRPHASE2_2 ERRAMP30 ERRPHASE30 ERRAMP1_2 ERRPHASE1_2 ERRAMP_1_1 ERRPHASE_1_1 ERRAMP_13 ERRPHASE_13 ERRAMP0_2 ERRPHASE0_2 ERRAMP_20 ERRPHASE_20 ERRAMP01 ERRPHASE01 ERRAMP_1_2 ERRPHASE_1_2 ERRAMP12 ERRPHASE12 $ %s %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le BPM.10L3.B1 3138.184185 0.197210435344 0.189150559736 0.000455399374889 0.0997334730964 0.000122772720356 0.000122772720356 0.267929522431 0.0215761995381 -0.391888515967 0.275202001741 -0.425021586236 0.000324029441834 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0132682585115 -0.35628748255 0.695181885389 0 0 0 0 0 0 0.554484450304 0.355303753625 0.320929585499 0 0 0 0 0 0 -0.152135234351 0.00569019210908 0.000122772720356 0.000905622201303 0.000122772720356 0.0603028083217 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569019210908 0.3 0.00569069295677 0.0682547902212 0.00569019210908 0.3 0.00569019210908 0.3 0.00650638821923 0.00163326888753 0.00569019210908 0.3 0.00569019210908 0.3 BPM.10L4.B1 6469.957401 0.0922046876129 0.109644919271 0.00018711086455 0.103914345103 0.00012791940796 0.00012791940796 0.267930410533 0.00819458330919 0.0564105924158 0.275959030143 -0.107693393329 0.000191862006451 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.565436665186 -0.177030995517 0.320928762509 0.0215787252708 -0.441976509319 0.0905036017824 0.0215813167721 0.441944330253 0.909496398216 0.296163874032 0.0156102394879 0.00012791940796 0.00248444677734 0.00012791940796 0.106112755051 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0156102394879 0.3 0.0179328880964 0.00439385510405 0.0156138734517 0.115134084436 0.0156138743244 0.115120259045 diff --git a/tests/inputs/spec_test.sdds.liny b/tests/inputs/spec_test.sdds.liny index 4ce3a122b..8c13bac68 100644 --- a/tests/inputs/spec_test.sdds.liny +++ b/tests/inputs/spec_test.sdds.liny @@ -4,7 +4,7 @@ @ NATQ2 %le 0.292828073369 @ NATQ2RMS %le 0.0671449388334 @ filename %s "/user/slops/data/LHC_DATA/OP_DATA/Betabeat/2018-08-22/LHCB1/Measurements/Beam1@Turn@2018_08_22@18_47_58_442/Beam1@Turn@2018_08_22@18_47_58_442.sdds.liny.notcleaned" -* NAME S PK2PK CO CORMS BPM_RES NOISE AVG_NOISE TUNEY AMPY MUY NATTUNEY NATMUY NATAMPY AMP_11 PHASE_11 FREQ_11 AMP11 PHASE11 FREQ11 AMP21 PHASE21 FREQ21 AMP10 PHASE10 FREQ10 AMP_12 PHASE_12 FREQ_12 AMP1_1 PHASE1_1 FREQ1_1 AMP0_3 PHASE0_3 FREQ0_3 AMP_13 PHASE_13 FREQ_13 AMP0_2 PHASE0_2 FREQ0_2 AMP_20 PHASE_20 FREQ_20 MUYSYNC NOISE_SCALED ERR_AMPY ERR_MUY ERR_NATAMPY ERR_NATMUY ERR_AMP_11 ERR_PHASE_11 ERR_AMP11 ERR_PHASE11 ERR_AMP21 ERR_PHASE21 ERR_AMP10 ERR_PHASE10 ERR_AMP_12 ERR_PHASE_12 ERR_AMP1_1 ERR_PHASE1_1 ERR_AMP0_3 ERR_PHASE0_3 ERR_AMP_13 ERR_PHASE_13 ERR_AMP0_2 ERR_PHASE0_2 ERR_AMP_20 ERR_PHASE_20 +* NAME S PK2PK CO CORMS BPM_RES NOISE AVG_NOISE TUNEY AMPY MUY NATTUNEY NATMUY NATAMPY AMP_11 PHASE_11 FREQ_11 AMP11 PHASE11 FREQ11 AMP21 PHASE21 FREQ21 AMP10 PHASE10 FREQ10 AMP_12 PHASE_12 FREQ_12 AMP1_1 PHASE1_1 FREQ1_1 AMP0_3 PHASE0_3 FREQ0_3 AMP_13 PHASE_13 FREQ_13 AMP0_2 PHASE0_2 FREQ0_2 AMP_20 PHASE_20 FREQ_20 MUYSYNC NOISE_SCALED ERRAMPY ERRMUY ERRNATAMPY ERRNATMUY ERRAMP_11 ERRPHASE_11 ERRAMP11 ERRPHASE11 ERRAMP21 ERRPHASE21 ERRAMP10 ERRPHASE10 ERRAMP_12 ERRPHASE_12 ERRAMP1_1 ERRPHASE1_1 ERRAMP0_3 ERRPHASE0_3 ERRAMP_13 ERRPHASE_13 ERRAMP0_2 ERRPHASE0_2 ERRAMP_20 ERRPHASE_20 $ %s %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le %le BPM.10L3.B1 3138.184185 0.0885082234868 -0.0057102951908 0.000220443037056 0.0940703834288 0.000115801410699 0.000115801410699 0.320929171609 0.0114567927814 -0.134511179388 0.303946206575 -0.255853683751 0.000254410201069 0 0 0 0 0 0 0.0135575905558 -0.155153810614 0.857052976781 0.304490436229 -0.409681073875 0.267931402832 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0.0123352661347 0.0669313599165 0.464138456361 0.316418952569 0.0101076638906 0.000115801410699 0.0016086846713 0.000115801410699 0.0724435060086 0.0101076638906 0.3 0.0101076638906 0.3 0.010108592784 0.11865564642 0.0105658424424 0.00528320262278 0.0101076638906 0.3 0.0101076638906 0.3 0.0101076638906 0.3 0.0101076638906 0.3 0.0101076638906 0.3 0.0101084328463 0.130413454702 BPM.10L4.B1 6469.957401 0.213097151184 0.103513348168 0.000533540893743 0.0971313801946 0.000119569522731 0.000119569522731 0.320928768102 0.0282328016349 -0.108436136293 0.303920645296 -0.13828225394 0.000395384932409 0 0 0 0 0 0 0.0115454935322 0.493471395433 0.856905712728 0.307125568029 -0.374650610108 0.267930515664 0 0 0 0 0 0 0 0 0 0.0118095893331 -0.288574216089 0.694827050106 0 0 0 0 0 0 0.342493995664 0.00423512778778 0.000119569522731 0.000674041522051 0.000119569522731 0.048130515419 0.00423512778778 0.3 0.00423512778778 0.3 0.0042354100463 0.0583813520118 0.0044303690098 0.00219467733141 0.00423512778778 0.3 0.00423512778778 0.3 0.00423512778778 0.3 0.0042354231065 0.0570757799477 0.00423512778778 0.3 0.00423512778778 0.3