From cd3753b5c116cdf6c40cff1cc80228cc20414132 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 28 Oct 2024 12:52:23 -0700 Subject: [PATCH 01/13] add ceres-solver fitting --- CMakeLists.txt | 5 + Dockerfile | 13 +- cmake/FindCeres.cmake | 39 ++++ include/array_ops.h | 6 + include/fitting_ops.h | 137 ++++++++++++++ src/array_ops.cxx | 18 ++ src/fitting_ops.cxx | 413 ++++++++++++++++++++++++++++++++++++++++++ 7 files changed, 630 insertions(+), 1 deletion(-) create mode 100644 cmake/FindCeres.cmake create mode 100644 include/array_ops.h create mode 100644 include/fitting_ops.h create mode 100644 src/fitting_ops.cxx diff --git a/CMakeLists.txt b/CMakeLists.txt index 144e2506..35c39c2c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -26,6 +26,7 @@ find_package(PythonInterp 3) find_package(PythonLibs 3) find_package(FLAC) find_package(GSL) +find_package(Ceres) find_package(OpenMP) if(OPENMP_FOUND) @@ -67,6 +68,7 @@ add_library(so3g SHARED src/so_linterp.cxx src/exceptions.cxx src/array_ops.cxx + src/fitting_ops.cxx ) # We could disable the lib prefix on the output library... but let's not. @@ -87,6 +89,9 @@ target_link_libraries(so3g spt3g::core) # Link GSL target_include_directories(so3g PRIVATE ${GSL_INCLUDE_DIR}) target_link_libraries(so3g ${GSL_LIBRARIES}) +# Link Ceres +target_include_directories(so3g PRIVATE ${CERES_INCLUDE_DIRS}) +target_link_libraries(so3g ${CERES_LIBRARIES} ${CERES_DEPENDENCIES}) # You probably want to select openblas, so pass -DBLA_VENDOR=OpenBLAS find_package(BLAS REQUIRED) diff --git a/Dockerfile b/Dockerfile index 49abf5ad..a74de187 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,11 +14,22 @@ RUN apt update && apt install -y \ gfortran \ libopenblas-dev \ libbz2-dev \ - python-is-python3 + python-is-python3 \ + libgoogle-glog-dev \ + libgflags-dev \ + libeigen3-dev \ # Set the working directory WORKDIR /app_lib/so3g +# Fetch and install ceres-solver +RUN git clone https://ceres-solver.googlesource.com/ceres-solver +WORKDIR /app_lib/so3g/ceres-solver +RUN mkdir build && cd build && cmake .. && make && make install + +# Set the working directory back to so3g +WORKDIR /app_lib/so3g + # Copy the current directory contents into the container ADD . /app_lib/so3g diff --git a/cmake/FindCeres.cmake b/cmake/FindCeres.cmake new file mode 100644 index 00000000..10a06ccc --- /dev/null +++ b/cmake/FindCeres.cmake @@ -0,0 +1,39 @@ +# Try to find the Ceres Solver library + +# Look for the Ceres package +find_path(CERES_INCLUDE_DIR ceres/ceres.h + PATH_SUFFIXES ceres +) + +find_library(CERES_LIBRARY ceres) + +# Check if Eigen is needed and available +find_package(Eigen3 REQUIRED) + +# Check if Ceres is found +if (CERES_INCLUDE_DIR AND CERES_LIBRARY AND TARGET Eigen3::Eigen) + # Ceres and Eigen were found + set(CERES_FOUND TRUE) + set(CERES_LIBRARIES ${CERES_LIBRARY}) + set(CERES_INCLUDE_DIRS ${CERES_INCLUDE_DIR} ${EIGEN3_INCLUDE_DIRS}) + + # Optionally, find other dependencies (like gflags and glog, which Ceres uses) + find_package(Glog REQUIRED) + find_package(Gflags REQUIRED) + + set(CERES_DEPENDENCIES ${GLOG_LIBRARIES} ${GFLAGS_LIBRARIES} Eigen3::Eigen) +else() + # Ceres or Eigen was not found + set(CERES_FOUND FALSE) +endif() + +# Set the results so they can be used by the project +mark_as_advanced(CERES_INCLUDE_DIR CERES_LIBRARY) + +# Provide an interface for usage +if (CERES_FOUND) + message(STATUS "Found Ceres: ${CERES_INCLUDE_DIR}") + message(STATUS "Found Eigen3: ${EIGEN3_INCLUDE_DIR}") +else() + message(WARNING "Could not find Ceres Solver or Eigen3") +endif() \ No newline at end of file diff --git a/include/array_ops.h b/include/array_ops.h new file mode 100644 index 00000000..366dc810 --- /dev/null +++ b/include/array_ops.h @@ -0,0 +1,6 @@ +#pragma once + +int get_dtype(const bp::object &); + +template +T _calculate_median(const T*, const int); \ No newline at end of file diff --git a/include/fitting_ops.h b/include/fitting_ops.h new file mode 100644 index 00000000..821dfe08 --- /dev/null +++ b/include/fitting_ops.h @@ -0,0 +1,137 @@ +#pragma once + +#include + +template +struct PolynomialModel +{ + // Ceres requires number of params + // to be known at compile time + static constexpr int nparams = Degree + 1; + + template + static T eval(T x, const T* params) + { + const T p0 = params[0]; + T result = p0; + for (int i = 1; i < nparams; ++i) { + const T p = params[i]; + result += p * ceres::pow(x, T(i)); + } + + return result; + } + // Not needed for least squares as ceres + // supports boundaries + template + static bool check_bounds(const T* params) + { + return true; + } +}; + +struct NoiseModel +{ + // Ceres requires number of params + // to be known at compile time + static constexpr int nparams = 3; + + template + static T eval(T f, const T* params) + { + const T fknee = params[0]; + const T w = params[1]; + const T alpha = params[2]; + + return w * (1.0 + ceres::pow(fknee / f, alpha)); + } + + // Slightly hacky way of bounds checking but is + // suggested by Ceres to ensure it never goes + // out of bounds + template + static bool check_bounds(const T* params) + { + const T w = params[1]; + if (w <= 0.0) { + return false; + } + return true; + } +}; + +// Model independent cost function for least-squares fitting +template +struct CostFunction +{ + using model = Model; + + CostFunction(int n, const double* x_data, const double* y_data) + : n_pts(n), x(x_data), y(y_data) {} + + template + bool operator()(const T* const params, T* residual) const { + for (int i = 0; i < n_pts; ++i) { + T model = Model::eval(T(x[i]), params); + residual[i] = T(y[i]) - model; + } + return true; + } + + static ceres::Problem create(const int n, const double* xx, const double* yy, double* p) + { + ceres::Problem problem; + + problem.AddResidualBlock( + new ceres::AutoDiffCostFunction, + ceres::DYNAMIC, Model::nparams>( + new CostFunction(n, xx, yy), n), nullptr, p); + + return problem; + } + +private: + const int n_pts; + const double* x; + const double* y; +}; + +// Model independent Negative Log Likelihood for generalized +// unconstrained minimization +template +struct NegLogLikelihood +{ + using model = Model; + + NegLogLikelihood(int n, const double* x_data, const double* y_data) + : n_pts(n), x(x_data), y(y_data) {} + + template + bool operator()(const T* const params, T* cost) const + { + // Check bounds (saves a lot of time) + if (!model::check_bounds(params)) { + return false; + } + + cost[0] = T(0.); + for (int i = 0; i < n_pts; ++i) { + T model = Model::eval(T(x[i]), params); + cost[0] += ceres::log(model) + T(y[i]) / model; + } + + return true; + } + + static ceres::FirstOrderFunction* create(int n, const double* xx, const double* yy) + { + // Ceres takes ownership of pointers so no cleanup is required + return new ceres::AutoDiffFirstOrderFunction, + Model::nparams>(new NegLogLikelihood(n, xx, yy)); + } + +private: + const int n_pts; + const double* x; + const double* y; +}; diff --git a/src/array_ops.cxx b/src/array_ops.cxx index d6dec7fd..b7ea268b 100644 --- a/src/array_ops.cxx +++ b/src/array_ops.cxx @@ -21,6 +21,7 @@ extern "C" { #include "so3g_numpy.h" #include "numpy_assist.h" #include "Ranges.h" +#include "array_ops.h" // TODO: Generalize to double precision too. // This implements Jon's noise model for ACT. It takes in @@ -993,6 +994,23 @@ void interp1d_linear(const bp::object & x, const bp::object & y, } } +template +T _calculate_median(const T* data, const int n) +{ + // Copy to prevent overwriting input with gsl median + // Explicitly cast to double here due to gsl + std::vector data_copy(n); + std::transform(data, data + n, data_copy.begin(), [](double val) { + return static_cast(val); + }); + + // GSL is much faster than a naive std::sort implementation + return gsl_stats_median(data_copy.data(), 1, n); +} + +template double _calculate_median(const double* arr, int size); +template float _calculate_median(const float* arr, int size); + PYBINDINGS("so3g") { diff --git a/src/fitting_ops.cxx b/src/fitting_ops.cxx new file mode 100644 index 00000000..add4bb03 --- /dev/null +++ b/src/fitting_ops.cxx @@ -0,0 +1,413 @@ +#define NO_IMPORT_ARRAY + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +#include +#include "so3g_numpy.h" +#include "numpy_assist.h" +#include "Ranges.h" +#include "array_ops.h" +#include "fitting_ops.h" + +template +void _least_squares(const double* x, const double* y, double* p, const int n, + const int nthreads=1, double* c=nullptr) +{ + // Set up ceres problem + ceres::Problem problem = CostFunction::create(n, x, y, p); + + ceres::Solver::Options options; + options.linear_solver_type = ceres::LinearSolverType::DENSE_QR; + options.logging_type = ceres::LoggingType::SILENT; + options.minimizer_progress_to_stdout = false; + options.num_threads = nthreads; + + ceres::Solver::Summary summary; + ceres::Solve(options, &problem, &summary); + + if (summary.IsSolutionUsable()) { + if (c) { + ceres::Covariance::Options covariance_options; + covariance_options.sparse_linear_algebra_library_type = + ceres::SparseLinearAlgebraLibraryType::EIGEN_SPARSE; + covariance_options.algorithm_type = + ceres::CovarianceAlgorithmType::DENSE_SVD; + covariance_options.null_space_rank = -1; + + ceres::Covariance covariance(covariance_options); + std::vector> covariance_blocks; + covariance_blocks.emplace_back(p, p); + if (covariance.Compute(covariance_blocks, &problem)) { + covariance.GetCovarianceBlock(p, p, c); + } + else { + for (int i = 0; i < CostFunction::model::nparams; ++i) { + p[i] = std::numeric_limits::quiet_NaN(); + c[i] = std::numeric_limits::quiet_NaN(); + } + } + } + } + else { + for (int i = 0; i < CostFunction::model::nparams; ++i) { + p[i] = std::numeric_limits::quiet_NaN(); + c[i] = std::numeric_limits::quiet_NaN(); + } + } +} + +template +bool _invert_matrix(const T* matrix, T* inverse, const int n) { + // Calculate inverse with Gaussian elimination (faster than GSL + // for small numbers of parameters) + std::unique_ptr aug_matrix = std::make_unique(n * 2 * n); + + for (int i = 0; i < n; ++i) { + for (int j = 0; j < n; ++j) { + aug_matrix[i * 2 * n + j] = matrix[i * n + j]; + } + for (int j = 0; j < n; ++j) { + aug_matrix[i * 2 * n + (n + j)] = (i == j) ? 1.0 : 0.0; + } + } + + for (int i = 0; i < n; ++i) { + T pivot = aug_matrix[i * 2 * n + i]; + if (pivot == 0.0) { + return false; + } + + for (int j = 0; j < 2 * n; ++j) { + aug_matrix[i * 2 * n + j] /= pivot; + } + + for (int k = 0; k < n; ++k) { + if (k == i) continue; + T factor = aug_matrix[k * 2 * n + i]; + for (int j = 0; j < 2 * n; ++j) { + aug_matrix[k * 2 * n + j] -= factor * aug_matrix[i * 2 * n + j]; + } + } + } + + for (int i = 0; i < n; ++i) { + for (int j = 0; j < n; ++j) { + inverse[i * n + j] = aug_matrix[i * 2 * n + (n + j)]; + } + } + + return true; +} + +// Compute the Hessian matrix using finite differences +template +void _compute_hessian(ceres::GradientProblem& problem, double* p, + double* hessian, const double epsilon=1e-5) { + + double gradient[Likelihood::model::nparams]; + double perturbed_parameters[Likelihood::model::nparams]; + double perturbed_gradient[Likelihood::model::nparams]; + double cost; + + problem.Evaluate(p, &cost, gradient); + + for (int i = 0; i < Likelihood::model::nparams; ++i) { + std::copy(p, p + Likelihood::model::nparams, perturbed_parameters); + perturbed_parameters[i] += epsilon; + problem.Evaluate(perturbed_parameters, &cost, perturbed_gradient); + + // Second derivative + for (int j = 0; j < Likelihood::model::nparams; ++j) { + hessian[i * Likelihood::model::nparams + j] = + (perturbed_gradient[j] - gradient[j]) / epsilon; + } + } +} + +// General minimization function +template +void _minimize(const double* x, const double* y, double* p, const int n, + const double tol, const int niter, double* c=nullptr, + const double epsilon=1e-5) +{ + // Create problem + ceres::GradientProblem problem(Likelihood::create(n, x, y)); + + // Options for solving + ceres::GradientProblemSolver::Options options; + options.logging_type = ceres::LoggingType::SILENT; + options.minimizer_progress_to_stdout = false; + options.line_search_direction_type = ceres::BFGS; + options.max_num_iterations = niter; + options.function_tolerance = tol; + + ceres::GradientProblemSolver::Summary summary; + + // Run the solver + ceres::Solve(options, problem, p, &summary); + + // Calculate uncertainties. Ceres does not include a way to calculate + // uncertainties for gradient problems but this seems fast enough. + if (summary.IsSolutionUsable()) { + if (c) { + double hessian[Likelihood::model::nparams * Likelihood::model::nparams]; + double inv_hessian[Likelihood::model::nparams * Likelihood::model::nparams]; + + _compute_hessian(problem, p, hessian, epsilon); + bool not_singular = _invert_matrix(hessian, inv_hessian, + Likelihood::model::nparams); + + if (not_singular) { + for (int i = 0; i < Likelihood::model::nparams; ++i) { + c[i] = std::sqrt(inv_hessian[i * Likelihood::model::nparams + i]); + } + } + else { + for (int i = 0; i < Likelihood::model::nparams; ++i) { + c[i] = std::numeric_limits::quiet_NaN() ; + } + } + } + } + else { + for (int i = 0; i < Likelihood::model::nparams; ++i) { + p[i] = std::numeric_limits::quiet_NaN() ; + c[i] = std::numeric_limits::quiet_NaN() ; + } + } +} + +auto _get_frequency_limits(const double* f, const double lowf, + const double fwhite_lower, + const double fwhite_upper, + const int nsamps) +{ + // Index for f = lowf + int lowf_i = 0; + for (int i = 0; i < nsamps; ++i) { + if (f[i] > lowf) { + lowf_i = i; + break; + } + } + + std::vector fwhite_i; + + // index for f > lower fwhite + for (int i = 0; i < nsamps; ++i) { + if (f[i] > fwhite_lower) { + fwhite_i.push_back(i); + break; + } + } + + // index for f < upper fwhite + for (int i = nsamps - 1; i >= 0; --i) { + if (f[i] < fwhite_upper) { + fwhite_i.push_back(i); + break; + } + } + + int fwhite_size = fwhite_i[1] - fwhite_i[0] + 1; + + return std::make_tuple(lowf_i, fwhite_i, fwhite_size); +} + +template +void _fit_noise(const double* f, const double* log_f, const double* pxx, + T* p, T* c, const int ndets, const int nsamps, + const int lowf_i, const std::vector fwhite_i, + const int fwhite_size, const double tol, const int niter, + const double epsilon) +{ + double log_pxx[nsamps]; + for (int i = 0; i < nsamps; ++i) { + log_pxx[i] = std::log10(pxx[i]); + } + + // Estimate of white noise + double wnest = _calculate_median(pxx + fwhite_i[0], fwhite_size); + + // Fit 1/f to line in logspace + double pfit[2] = {1., 1.}; + _least_squares(log_f, log_pxx, pfit, lowf_i + 1, 1); + + // Find guess for fknee + int fi = 0; + double min_f = std::numeric_limits::max(); + for (int i = 0; i < nsamps; ++i) { + double model_val = CostFunc::model::eval(log_f[i], pfit); + double diff = std::abs(std::pow(10., model_val) - wnest); + if (diff <= min_f) { + min_f = diff; + fi = i; + } + } + + // Initial parameters + double p0[Likelihood::model::nparams] = {f[fi], wnest, -pfit[1]}; + double c0[Likelihood::model::nparams]; + + // Minimize + _minimize(f, pxx, p0, nsamps, tol, niter, c0, epsilon); + + // Implict cast from double to T + for (int i = 0; i < Likelihood::model::nparams; ++i) { + p[i] = p0[i]; + c[i] = c0[i]; + } +} + +template +void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, + bp::object & p, bp::object & c, const double lowf, + const double fwhite_lower, const double fwhite_upper, + const double tol, const int niter, const double epsilon) +{ + using Likelihood = NegLogLikelihood; + using CostFunc = CostFunction>; + + BufferWrapper pxx_buf ("pxx", pxx, false, std::vector{-1, -1}); + if (pxx_buf->strides[1] != pxx_buf->itemsize) + throw ValueError_exception("Argument 'pxx' must be contiguous in last axis."); + T* pxx_data = (T*)pxx_buf->buf; + const int ndets = pxx_buf->shape[0]; + const int nsamps = pxx_buf->shape[1]; + const int row_stride = pxx_buf->strides[0] / sizeof(T); + + BufferWrapper f_buf ("f", f, false, std::vector{nsamps}); + if (f_buf->strides[0] != f_buf->itemsize) + throw ValueError_exception("Argument 'f' must be a C-contiguous 1d array."); + T* f_data = (T*)f_buf->buf; + + BufferWrapper p_buf ("p", p, false, std::vector{ndets, Likelihood::model::nparams}); + if (p_buf->strides[1] != p_buf->itemsize) + throw ValueError_exception("Argument 'p' must be contiguous in last axis."); + T* p_data = (T*)p_buf->buf; + const int p_stride = p_buf->strides[0] / sizeof(T); + + BufferWrapper c_buf ("c", c, false, std::vector{ndets, Likelihood::model::nparams}); + if (c_buf->strides[1] != c_buf->itemsize) + throw ValueError_exception("Argument 'c' must be contiguous in last axis."); + T* c_data = (T*)c_buf->buf; + const int c_stride = c_buf->strides[0] / sizeof(T); + + if constexpr (std::is_same::value) { + // Get frequency bounds and log(f) + auto [lowf_i, fwhite_i, fwhite_size] = + _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); + + // Fit in logspace + double log_f[nsamps]; + for (int i = 0; i < nsamps; ++i) { + log_f[i] = std::log10(f_data[i]); + } + + #pragma omp parallel for + for (int i = 0; i < ndets; ++i) { + int ioff = i * row_stride; + int poff = i * p_stride; + int coff = i * c_stride; + + double* pxx_det = pxx_data + ioff; + T* p_det = p_data + poff; + T* c_det = c_data + coff; + + _fit_noise(f_data, log_f, pxx_det, p_det, c_det, + ndets, nsamps, lowf_i, fwhite_i, fwhite_size, + tol, niter, epsilon); + } + } + else if constexpr (std::is_same::value) { + // Copy f to double + double f_double[nsamps]; + + std::transform(f_data, f_data + nsamps, f_double, + [](float value) { return static_cast(value); }); + + // Get frequency bounds and log(f) + auto [lowf_i, fwhite_i, fwhite_size] = + _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); + + // Fit in logspace + double log_f[nsamps]; + for (int i = 0; i < nsamps; ++i) { + log_f[i] = std::log10(f_double[i]); + } + + #pragma omp parallel for + for (int i = 0; i < ndets; ++i) { + int ioff = i * row_stride; + int poff = i * p_stride; + int coff = i * c_stride; + + // Copy pxx row to double + double pxx_det[nsamps]; + + std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, + [](float value) { return static_cast(value); }); + + // Cast implicitly on assignment + T* p_det = p_data + poff; + T* c_det = c_data + coff; + + _fit_noise(f_double, log_f, pxx_det, p_det, + c_det, ndets, nsamps, lowf_i, fwhite_i, + fwhite_size, tol, niter, epsilon); + } + } +} + +void fit_noise(const bp::object & f, const bp::object & pxx, bp::object & p, bp::object & c, + const double lowf, const double fwhite_lower, const double fwhite_upper, + const double tol, const int niter, const double epsilon) +{ + // Get data type + int dtype = get_dtype(pxx); + + if (dtype == NPY_FLOAT) { + _fit_noise_buffer(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon); + } + else if (dtype == NPY_DOUBLE) { + _fit_noise_buffer(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon); + } + else { + throw TypeError_exception("Only float32 or float64 arrays are supported."); + } +} + +PYBINDINGS("so3g") +{ + bp::def("fit_noise", fit_noise, + "fit_noise(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon)" + "\n" + "Fits noise model with white and 1/f noise to the PSD of signal. Uses a MLE " + "method that minimizes a log likelihood. OMP is used to parallelize across dets (rows)." + "\n" + "Args:\n" + " f: frequency array (float32/64) with dimensions (nsamps,)\n" + " pxx: PSD array (float32/64) with dimensions (nsamps,)\n" + " p: parameter array (float32/64) with dimensions (nparams,). This is modified in place.\n" + " c: uncertaintiy array (float32/64) with dimensions (nsamps,). This is modified in place.\n" + " lowf: Frequency below which estimate of 1/f noise index and knee are estimated for initial " + " guess passed to least_squares fit (float64).\n" + " fwhite_lower: Lower frequency used to estimate white noise for initial guess passed to " + " least_squares fit (float64).\n" + " fwhite_upper: Upper frequency used to estimate white noise for initial guess passed to " + " least_squares fit (float64).\n" + " tol: absolute tolerance for minimization (float64).\n" + " niter: total number of iterations to run minimization for (int).\n" + " epsilon: Value to perturb gradients by when calculating uncertainties with the inverse " + " Hessian matrix (float64).\n"); +} \ No newline at end of file From 47133b999b08c00591b088c6ad083d537feb3e2c Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 28 Oct 2024 13:03:26 -0700 Subject: [PATCH 02/13] fix Dockerfile typo --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a74de187..b287fa3d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,7 +17,7 @@ RUN apt update && apt install -y \ python-is-python3 \ libgoogle-glog-dev \ libgflags-dev \ - libeigen3-dev \ + libeigen3-dev # Set the working directory WORKDIR /app_lib/so3g From abb235a20c269f15c2c09691588e46c62bbec248 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 28 Oct 2024 13:21:02 -0700 Subject: [PATCH 03/13] adjustment for ceres git clone arguments --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index b287fa3d..39ca125a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,7 +23,7 @@ RUN apt update && apt install -y \ WORKDIR /app_lib/so3g # Fetch and install ceres-solver -RUN git clone https://ceres-solver.googlesource.com/ceres-solver +RUN git clone --recurse-submodules https://github.com/ceres-solver/ceres-solver WORKDIR /app_lib/so3g/ceres-solver RUN mkdir build && cd build && cmake .. && make && make install From 9ed4ad3ea737868b427594d6d7329fc902d9e592 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 28 Oct 2024 14:02:09 -0700 Subject: [PATCH 04/13] adjust cmake files, fix missing header, add nproc to ceres-solver make --- CMakeLists.txt | 4 +-- Dockerfile | 6 ++++- cmake/FindCeres.cmake | 54 ++++++++++++++++------------------------ cmake/FindEigen3.cmake | 25 +++++++++++++++++++ cmake/FindGFlags.cmake | 23 +++++++++++++++++ cmake/FindGlog.cmake | 23 +++++++++++++++++ include/fitting_ops.h | 6 +++-- src/array_ops.cxx | 1 + test/test_fitting_ops.py | 51 +++++++++++++++++++++++++++++++++++++ 9 files changed, 156 insertions(+), 37 deletions(-) create mode 100644 cmake/FindEigen3.cmake create mode 100644 cmake/FindGFlags.cmake create mode 100644 cmake/FindGlog.cmake create mode 100644 test/test_fitting_ops.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 35c39c2c..75cc1439 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,8 +90,8 @@ target_link_libraries(so3g spt3g::core) target_include_directories(so3g PRIVATE ${GSL_INCLUDE_DIR}) target_link_libraries(so3g ${GSL_LIBRARIES}) # Link Ceres -target_include_directories(so3g PRIVATE ${CERES_INCLUDE_DIRS}) -target_link_libraries(so3g ${CERES_LIBRARIES} ${CERES_DEPENDENCIES}) +target_include_directories(so3g PRIVATE ${CERES_INCLUDE_DIR} ${EIGEN_INCLUDE_DIR}) +target_link_libraries(so3g ${CERES_LIBRARY} Eigen3::Eigen) # You probably want to select openblas, so pass -DBLA_VENDOR=OpenBLAS find_package(BLAS REQUIRED) diff --git a/Dockerfile b/Dockerfile index 39ca125a..281fb543 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,7 +25,11 @@ WORKDIR /app_lib/so3g # Fetch and install ceres-solver RUN git clone --recurse-submodules https://github.com/ceres-solver/ceres-solver WORKDIR /app_lib/so3g/ceres-solver -RUN mkdir build && cd build && cmake .. && make && make install +RUN mkdir build \ + && cd build \ + && cmake .. \ + && make -j$(nproc) \ + && make install # Set the working directory back to so3g WORKDIR /app_lib/so3g diff --git a/cmake/FindCeres.cmake b/cmake/FindCeres.cmake index 10a06ccc..5c8ba111 100644 --- a/cmake/FindCeres.cmake +++ b/cmake/FindCeres.cmake @@ -1,39 +1,29 @@ -# Try to find the Ceres Solver library +# - Find the Ceres Solver library +# +# CERES_FOUND +# CERES_INCLUDE_DIRS +# CERES_LIBRARIES +# CERES_LIBRARY_DIRS # Look for the Ceres package -find_path(CERES_INCLUDE_DIR ceres/ceres.h - PATH_SUFFIXES ceres -) - -find_library(CERES_LIBRARY ceres) +find_path(CERES_INCLUDE_DIR NAMES ceres/ceres.h HINTS ENV CERES_DIR PATH_SUFFIXES include) +find_library(CERES_LIBRARY NAMES ceres HINTS ENV CERES_DIR PATH_SUFFIXES lib) -# Check if Eigen is needed and available +# Get Eigen find_package(Eigen3 REQUIRED) +find_package(Glog REQUIRED) +find_package(Gflags REQUIRED) + +# Create the imported Ceres target +add_library(Ceres::Ceres UNKNOWN IMPORTED) +set_target_properties(Ceres::Ceres PROPERTIES + IMPORTED_LOCATION "${CERES_LIBRARY}" + INTERFACE_INCLUDE_DIRECTORIES "${CERES_INCLUDE_DIR};${EIGEN3_INCLUDE_DIR}" + INTERFACE_LINK_LIBRARIES "${GLOG_LIBRARIES};${GFLAGS_LIBRARIES};${CERES_LIBRARY}" +) -# Check if Ceres is found -if (CERES_INCLUDE_DIR AND CERES_LIBRARY AND TARGET Eigen3::Eigen) - # Ceres and Eigen were found - set(CERES_FOUND TRUE) - set(CERES_LIBRARIES ${CERES_LIBRARY}) - set(CERES_INCLUDE_DIRS ${CERES_INCLUDE_DIR} ${EIGEN3_INCLUDE_DIRS}) - - # Optionally, find other dependencies (like gflags and glog, which Ceres uses) - find_package(Glog REQUIRED) - find_package(Gflags REQUIRED) - - set(CERES_DEPENDENCIES ${GLOG_LIBRARIES} ${GFLAGS_LIBRARIES} Eigen3::Eigen) -else() - # Ceres or Eigen was not found - set(CERES_FOUND FALSE) -endif() +include (FindPackageHandleStandardArgs) +find_package_handle_standard_args (Ceres DEFAULT_MSG CERES_LIBRARY CERES_INCLUDE_DIR) # Set the results so they can be used by the project -mark_as_advanced(CERES_INCLUDE_DIR CERES_LIBRARY) - -# Provide an interface for usage -if (CERES_FOUND) - message(STATUS "Found Ceres: ${CERES_INCLUDE_DIR}") - message(STATUS "Found Eigen3: ${EIGEN3_INCLUDE_DIR}") -else() - message(WARNING "Could not find Ceres Solver or Eigen3") -endif() \ No newline at end of file +mark_as_advanced(CERES_INCLUDE_DIR CERES_LIBRARY) \ No newline at end of file diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake new file mode 100644 index 00000000..93498abf --- /dev/null +++ b/cmake/FindEigen3.cmake @@ -0,0 +1,25 @@ +# - Find the Eigen library +# +# EIGEN_FOUND +# EIGEN_INCLUDE_DIRS +# EIGEN_LIBRARIES +# EIGEN_LIBRARY_DIRS + +if (EIGEN_INCLUDE_DIR) +# Already in cache, be silent +set (EIGEN_FIND_QUIETLY TRUE) +endif (EIGEN_INCLUDE_DIR) + +find_path(EIGEN_INCLUDE_DIR "Eigen/Core" + HINTS ENV EIGEN_DIR + PATH_SUFFIXES eigen3 +) + +add_library(Eigen3::Eigen INTERFACE IMPORTED) + set_target_properties(Eigen3::Eigen PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${EIGEN_INCLUDE_DIR}") + +include (FindPackageHandleStandardArgs) +find_package_handle_standard_args (Eigen3 DEFAULT_MSG EIGEN_INCLUDE_DIR) + +mark_as_advanced(EIGEN_INCLUDE_DIR) diff --git a/cmake/FindGFlags.cmake b/cmake/FindGFlags.cmake new file mode 100644 index 00000000..9473cf23 --- /dev/null +++ b/cmake/FindGFlags.cmake @@ -0,0 +1,23 @@ +# - Find GFLAGS +# +# GFLAGS_FOUND +# GFLAGS_INCLUDE_DIRS +# GFLAGS_LIBRARIES +# GFLAGS_LIBRARY_DIRS + +if (GFLAGS_INCLUDE_DIR) +# Already in cache, be silent +set (GFLAG_FIND_QUIETLY TRUE) +endif (GFLAGS_INCLUDE_DIR) + +find_path(GFLAGS_INCLUDE_DIR NAME gflags/gflags.h HINTS ENV GFLAGS_DIR PATH_SUFFIXES include) +find_library(GFLAGS_LIBRARY NAME gflags HINTS ENV GFLAGS_DIR PATH_SUFFIXES lib) + +set(GFLAGS_INCLUDE_DIRS ${GFLAGS_INCLUDE_DIR}) +set(GFLAGS_LIBRARIES ${GFLAGS_LIBRARY}) + +include (FindPackageHandleStandardArgs) +find_package_handle_standard_args (Gflags DEFAULT_MSG GFLAGS_LIBRARIES GFLAGS_INCLUDE_DIRS) + +mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE + GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) \ No newline at end of file diff --git a/cmake/FindGlog.cmake b/cmake/FindGlog.cmake new file mode 100644 index 00000000..6b380c61 --- /dev/null +++ b/cmake/FindGlog.cmake @@ -0,0 +1,23 @@ +# - Find glog +# +# GLOG_FOUND +# GLOG_INCLUDE_DIRS +# GLOG_LIBRARIES +# GLOG_LIBRARY_DIRS + +if (GLOG_INCLUDE_DIR) +# Already in cache, be silent +set (GLOG_FIND_QUIETLY TRUE) +endif (GLOG_INCLUDE_DIR) + +find_path(GLOG_INCLUDE_DIR glog/logging.h HINTS ENV GLOG_DIR PATH_SUFFIXES include) +find_library(GLOG_LIBRARY NAME glog HINTS ENV GLOG_DIR PATH_SUFFIXES lib) + +set(GLOG_INCLUDE_DIRS ${GLOG_INCLUDE_DIR}) +set(GLOG_LIBRARIES ${GLOG_LIBRARY}) + +include (FindPackageHandleStandardArgs) +find_package_handle_standard_args (Glog DEFAULT_MSG GLOG_LIBRARY GLOG_INCLUDE_DIR) + +mark_as_advanced(GLOG_LIBRARY_DEBUG GLOG_LIBRARY_RELEASE + GLOG_LIBRARY GLOG_INCLUDE_DIR GLOG_ROOT_DIR) diff --git a/include/fitting_ops.h b/include/fitting_ops.h index 821dfe08..c8886062 100644 --- a/include/fitting_ops.h +++ b/include/fitting_ops.h @@ -78,7 +78,8 @@ struct CostFunction return true; } - static ceres::Problem create(const int n, const double* xx, const double* yy, double* p) + static ceres::Problem create(const int n, const double* xx, + const double* yy, double* p) { ceres::Problem problem; @@ -123,7 +124,8 @@ struct NegLogLikelihood return true; } - static ceres::FirstOrderFunction* create(int n, const double* xx, const double* yy) + static ceres::FirstOrderFunction* create(int n, const double* xx, + const double* yy) { // Ceres takes ownership of pointers so no cleanup is required return new ceres::AutoDiffFirstOrderFunction, diff --git a/src/array_ops.cxx b/src/array_ops.cxx index b7ea268b..65bdb8ca 100644 --- a/src/array_ops.cxx +++ b/src/array_ops.cxx @@ -16,6 +16,7 @@ extern "C" { #include #include +#include #include #include "so3g_numpy.h" diff --git a/test/test_fitting_ops.py b/test/test_fitting_ops.py new file mode 100644 index 00000000..313f8200 --- /dev/null +++ b/test/test_fitting_ops.py @@ -0,0 +1,51 @@ +import unittest + +import so3g +import numpy as np + +from scipy.optimize import minimize +import numdifftools as ndt + + +class TestFitting(unittest.TestCase): + """Test fitting noise model.""" + + def test_00_fit_noise(self): + + def noise_model(f, p): + fknee, w, alpha = p[0], p[1], p[2] + return w * (1 + (fknee / f) ** alpha) + + ndets = 3; + nsamps = 1024 // 2 + 1 # assume nperseg = 1024 for psd + dtype = "float32" + order = "C" + + lowf = 1. + fwhite = [10., 100.] + + p0 = np.array([10., 2., 0.7]) # fk, w, alpha + nparams = len(p0) + + tol = 1e-8 # so3g minimization tolerance + niter = 200*nparams # so3g max iterations + epsilon = 1e-5 # so3g gradient perturbation epsilon (uncertainty calculation) + + f = np.linspace(0.01, 200., nsamps, dtype=dtype) + pxx = np.zeros((ndets, nsamps), dtype=dtype, order=order) + + for i in range(ndets): + pxx[i,:] = noise_model(f, p0) + + so3g_fitout = np.zeros((ndets, nparams),dtype=dtype, order=order) + so3g_covout = np.zeros((ndets, nparams),dtype=dtype, order=order) + + so3g.fit_noise(f, pxx, so3g_fitout, so3g_covout, lowf, fwhite[0], fwhite[1], tol, niter, epsilon) + + for i in range(ndets): + residual = np.abs(p0 - so3g_fitout[i]) / so3g_covout[i] + np.testing.assert_array_less(residual, 1.0) + + +if __name__ == "__main__": + unittest.main() \ No newline at end of file From 1e4934136d62bcfdb4b724411104fa2ddef00bb6 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 07:08:20 -0700 Subject: [PATCH 05/13] update CMakeLists.txt and Dockerfile --- CMakeLists.txt | 3 +-- Dockerfile | 18 +++++------------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 75cc1439..03944455 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,8 +90,7 @@ target_link_libraries(so3g spt3g::core) target_include_directories(so3g PRIVATE ${GSL_INCLUDE_DIR}) target_link_libraries(so3g ${GSL_LIBRARIES}) # Link Ceres -target_include_directories(so3g PRIVATE ${CERES_INCLUDE_DIR} ${EIGEN_INCLUDE_DIR}) -target_link_libraries(so3g ${CERES_LIBRARY} Eigen3::Eigen) +target_link_libraries(so3g Ceres::ceres Eigen3::Eigen) # You probably want to select openblas, so pass -DBLA_VENDOR=OpenBLAS find_package(BLAS REQUIRED) diff --git a/Dockerfile b/Dockerfile index 281fb543..52bf16d7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -17,23 +17,15 @@ RUN apt update && apt install -y \ python-is-python3 \ libgoogle-glog-dev \ libgflags-dev \ - libeigen3-dev + libmetis-dev \ + libgtest-dev \ + libabsl-dev \ + libeigen3-dev \ + libceres-dev # Set the working directory WORKDIR /app_lib/so3g -# Fetch and install ceres-solver -RUN git clone --recurse-submodules https://github.com/ceres-solver/ceres-solver -WORKDIR /app_lib/so3g/ceres-solver -RUN mkdir build \ - && cd build \ - && cmake .. \ - && make -j$(nproc) \ - && make install - -# Set the working directory back to so3g -WORKDIR /app_lib/so3g - # Copy the current directory contents into the container ADD . /app_lib/so3g From 058bad8e8cf179cb89549d6e2679f3c1afa407b4 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 07:53:06 -0700 Subject: [PATCH 06/13] specify ceres-solver 2.2 in Dockerfile --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 52bf16d7..d2c82978 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,7 +21,7 @@ RUN apt update && apt install -y \ libgtest-dev \ libabsl-dev \ libeigen3-dev \ - libceres-dev + libceres-dev=2.2.0+dfsg-4.1ubuntu2 # Set the working directory WORKDIR /app_lib/so3g From 7d530ef1a5e165bf3337f92e27cb54c2840ff1d2 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 08:11:38 -0700 Subject: [PATCH 07/13] try building ceres-solver v2.2 --- Dockerfile | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index d2c82978..f8087ded 100644 --- a/Dockerfile +++ b/Dockerfile @@ -21,11 +21,24 @@ RUN apt update && apt install -y \ libgtest-dev \ libabsl-dev \ libeigen3-dev \ - libceres-dev=2.2.0+dfsg-4.1ubuntu2 # Set the working directory WORKDIR /app_lib/so3g +# Fetch and install ceres-solver +RUN git clone --depth 1 --branch 2.2.0 --recurse-submodules https://github.com/ceres-solver/ceres-solver + +WORKDIR /app_lib/so3g/ceres-solver + +RUN mkdir build \ + && cd build \ + && cmake .. \ + && make -j$(nproc) \ + && make install + +# Set the working directory back to so3g +WORKDIR /app_lib/so3g + # Copy the current directory contents into the container ADD . /app_lib/so3g From 76c91e49b34b1d6625577734aec95782bc087404 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 08:12:55 -0700 Subject: [PATCH 08/13] fix typo --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index f8087ded..29fb0e45 100644 --- a/Dockerfile +++ b/Dockerfile @@ -20,7 +20,7 @@ RUN apt update && apt install -y \ libmetis-dev \ libgtest-dev \ libabsl-dev \ - libeigen3-dev \ + libeigen3-dev # Set the working directory WORKDIR /app_lib/so3g From b3062e3b1ef429e231159c12d329f77f8e0944bc Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 08:35:23 -0700 Subject: [PATCH 09/13] remove unneeded imports from tests --- test/test_fitting_ops.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/test/test_fitting_ops.py b/test/test_fitting_ops.py index 313f8200..ff37725f 100644 --- a/test/test_fitting_ops.py +++ b/test/test_fitting_ops.py @@ -3,9 +3,6 @@ import so3g import numpy as np -from scipy.optimize import minimize -import numdifftools as ndt - class TestFitting(unittest.TestCase): """Test fitting noise model.""" From 9e4b86c54104a8ae27d7d22ae1921b72e006d99f Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 10:47:49 -0700 Subject: [PATCH 10/13] remove tests from ceres build. improve fit_noise docstring --- Dockerfile | 2 +- cmake/FindCeres.cmake | 2 +- cmake/FindEigen3.cmake | 4 +-- src/fitting_ops.cxx | 77 ++++++++++++++++++++-------------------- test/test_fitting_ops.py | 14 ++++---- 5 files changed, 51 insertions(+), 48 deletions(-) diff --git a/Dockerfile b/Dockerfile index 29fb0e45..8b2d746b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ WORKDIR /app_lib/so3g/ceres-solver RUN mkdir build \ && cd build \ - && cmake .. \ + && cmake .. -DBUILD_TESTING=OFF \ && make -j$(nproc) \ && make install diff --git a/cmake/FindCeres.cmake b/cmake/FindCeres.cmake index 5c8ba111..686c4463 100644 --- a/cmake/FindCeres.cmake +++ b/cmake/FindCeres.cmake @@ -9,7 +9,7 @@ find_path(CERES_INCLUDE_DIR NAMES ceres/ceres.h HINTS ENV CERES_DIR PATH_SUFFIXES include) find_library(CERES_LIBRARY NAMES ceres HINTS ENV CERES_DIR PATH_SUFFIXES lib) -# Get Eigen +# Get Dependencies find_package(Eigen3 REQUIRED) find_package(Glog REQUIRED) find_package(Gflags REQUIRED) diff --git a/cmake/FindEigen3.cmake b/cmake/FindEigen3.cmake index 93498abf..303863c7 100644 --- a/cmake/FindEigen3.cmake +++ b/cmake/FindEigen3.cmake @@ -16,8 +16,8 @@ find_path(EIGEN_INCLUDE_DIR "Eigen/Core" ) add_library(Eigen3::Eigen INTERFACE IMPORTED) - set_target_properties(Eigen3::Eigen PROPERTIES - INTERFACE_INCLUDE_DIRECTORIES "${EIGEN_INCLUDE_DIR}") +set_target_properties(Eigen3::Eigen PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${EIGEN_INCLUDE_DIR}") include (FindPackageHandleStandardArgs) find_package_handle_standard_args (Eigen3 DEFAULT_MSG EIGEN_INCLUDE_DIR) diff --git a/src/fitting_ops.cxx b/src/fitting_ops.cxx index add4bb03..95a05f2a 100644 --- a/src/fitting_ops.cxx +++ b/src/fitting_ops.cxx @@ -174,15 +174,15 @@ void _minimize(const double* x, const double* y, double* p, const int n, } else { for (int i = 0; i < Likelihood::model::nparams; ++i) { - c[i] = std::numeric_limits::quiet_NaN() ; + c[i] = std::numeric_limits::quiet_NaN(); } } } } else { for (int i = 0; i < Likelihood::model::nparams; ++i) { - p[i] = std::numeric_limits::quiet_NaN() ; - c[i] = std::numeric_limits::quiet_NaN() ; + p[i] = std::numeric_limits::quiet_NaN(); + c[i] = std::numeric_limits::quiet_NaN(); } } } @@ -303,15 +303,22 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, T* c_data = (T*)c_buf->buf; const int c_stride = c_buf->strides[0] / sizeof(T); - if constexpr (std::is_same::value) { - // Get frequency bounds and log(f) + + if constexpr (std::is_same::value) { + // Copy f to double + double f_double[nsamps]; + + std::transform(f_data, f_data + nsamps, f_double, + [](float value) { return static_cast(value); }); + + // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_data[i]); + log_f[i] = std::log10(f_double[i]); } #pragma omp parallel for @@ -320,30 +327,30 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - double* pxx_det = pxx_data + ioff; + // Copy pxx row to double + double pxx_det[nsamps]; + + std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, + [](float value) { return static_cast(value); }); + + // Cast to double implicitly on assignment T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_data, log_f, pxx_det, p_det, c_det, - ndets, nsamps, lowf_i, fwhite_i, fwhite_size, - tol, niter, epsilon); + _fit_noise(f_double, log_f, pxx_det, p_det, + c_det, ndets, nsamps, lowf_i, fwhite_i, + fwhite_size, tol, niter, epsilon); } } - else if constexpr (std::is_same::value) { - // Copy f to double - double f_double[nsamps]; - - std::transform(f_data, f_data + nsamps, f_double, - [](float value) { return static_cast(value); }); - - // Get frequency bounds and log(f) + else if constexpr (std::is_same::value) { + // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_double[i]); + log_f[i] = std::log10(f_data[i]); } #pragma omp parallel for @@ -352,19 +359,13 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - // Copy pxx row to double - double pxx_det[nsamps]; - - std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, - [](float value) { return static_cast(value); }); - - // Cast implicitly on assignment + double* pxx_det = pxx_data + ioff; T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_double, log_f, pxx_det, p_det, - c_det, ndets, nsamps, lowf_i, fwhite_i, - fwhite_size, tol, niter, epsilon); + _fit_noise(f_data, log_f, pxx_det, p_det, c_det, + ndets, nsamps, lowf_i, fwhite_i, fwhite_size, + tol, niter, epsilon); } } } @@ -392,15 +393,15 @@ PYBINDINGS("so3g") bp::def("fit_noise", fit_noise, "fit_noise(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon)" "\n" - "Fits noise model with white and 1/f noise to the PSD of signal. Uses a MLE " + "Fits noise model with white and 1/f components to the PSD of signal. Uses a MLE " "method that minimizes a log likelihood. OMP is used to parallelize across dets (rows)." "\n" "Args:\n" - " f: frequency array (float32/64) with dimensions (nsamps,)\n" - " pxx: PSD array (float32/64) with dimensions (nsamps,)\n" - " p: parameter array (float32/64) with dimensions (nparams,). This is modified in place.\n" - " c: uncertaintiy array (float32/64) with dimensions (nsamps,). This is modified in place.\n" - " lowf: Frequency below which estimate of 1/f noise index and knee are estimated for initial " + " f: frequency array (float32/64) with dimensions (nsamps,).\n" + " pxx: PSD array (float32/64) with dimensions (ndets, nsamps).\n" + " p: parameter array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" + " c: uncertaintiy array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" + " lowf: Frequency below which the 1/f noise index and fknee are estimated for initial " " guess passed to least_squares fit (float64).\n" " fwhite_lower: Lower frequency used to estimate white noise for initial guess passed to " " least_squares fit (float64).\n" @@ -409,5 +410,5 @@ PYBINDINGS("so3g") " tol: absolute tolerance for minimization (float64).\n" " niter: total number of iterations to run minimization for (int).\n" " epsilon: Value to perturb gradients by when calculating uncertainties with the inverse " - " Hessian matrix (float64).\n"); + " Hessian matrix (float64). Affects minimization only.\n"); } \ No newline at end of file diff --git a/test/test_fitting_ops.py b/test/test_fitting_ops.py index ff37725f..38391eca 100644 --- a/test/test_fitting_ops.py +++ b/test/test_fitting_ops.py @@ -14,19 +14,19 @@ def noise_model(f, p): return w * (1 + (fknee / f) ** alpha) ndets = 3; - nsamps = 1024 // 2 + 1 # assume nperseg = 1024 for psd + nsamps = 1024 // 2 + 1 # Assume nperseg = 1024 for psd dtype = "float32" order = "C" lowf = 1. fwhite = [10., 100.] - p0 = np.array([10., 2., 0.7]) # fk, w, alpha + p0 = np.array([10., 2., 0.7]) # fknee, w, alpha nparams = len(p0) tol = 1e-8 # so3g minimization tolerance niter = 200*nparams # so3g max iterations - epsilon = 1e-5 # so3g gradient perturbation epsilon (uncertainty calculation) + epsilon = 1e-5 # so3g gradient perturbation epsilon f = np.linspace(0.01, 200., nsamps, dtype=dtype) pxx = np.zeros((ndets, nsamps), dtype=dtype, order=order) @@ -34,11 +34,13 @@ def noise_model(f, p): for i in range(ndets): pxx[i,:] = noise_model(f, p0) - so3g_fitout = np.zeros((ndets, nparams),dtype=dtype, order=order) - so3g_covout = np.zeros((ndets, nparams),dtype=dtype, order=order) + so3g_params = np.zeros((ndets, nparams), dtype=dtype, order=order) + so3g_sigmas = np.zeros((ndets, nparams), dtype=dtype, order=order) - so3g.fit_noise(f, pxx, so3g_fitout, so3g_covout, lowf, fwhite[0], fwhite[1], tol, niter, epsilon) + so3g.fit_noise(f, pxx, so3g_params, so3g_sigmas, lowf, fwhite[0], + fwhite[1], tol, niter, so3g_sigmas) + # Check if fitted parameters deviate from input by more than 1-sigma for i in range(ndets): residual = np.abs(p0 - so3g_fitout[i]) / so3g_covout[i] np.testing.assert_array_less(residual, 1.0) From b25095e87740cff6713234a375bcf1b776216178 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Tue, 29 Oct 2024 11:08:20 -0700 Subject: [PATCH 11/13] fix test --- test/test_fitting_ops.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/test_fitting_ops.py b/test/test_fitting_ops.py index 38391eca..2fa18c80 100644 --- a/test/test_fitting_ops.py +++ b/test/test_fitting_ops.py @@ -38,11 +38,11 @@ def noise_model(f, p): so3g_sigmas = np.zeros((ndets, nparams), dtype=dtype, order=order) so3g.fit_noise(f, pxx, so3g_params, so3g_sigmas, lowf, fwhite[0], - fwhite[1], tol, niter, so3g_sigmas) + fwhite[1], tol, niter, epsilon) # Check if fitted parameters deviate from input by more than 1-sigma for i in range(ndets): - residual = np.abs(p0 - so3g_fitout[i]) / so3g_covout[i] + residual = np.abs(p0 - so3g_params[i]) / so3g_sigmas[i] np.testing.assert_array_less(residual, 1.0) From 4f55e4196ec90df978e5bae41db7dbdfa54eb08f Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 4 Nov 2024 07:06:20 -0800 Subject: [PATCH 12/13] improve test, fix pointers, add bounds, disable logging --- src/fitting_ops.cxx | 176 +++++++++++++++++++++++++-------------- test/test_fitting_ops.py | 25 +++++- 2 files changed, 133 insertions(+), 68 deletions(-) diff --git a/src/fitting_ops.cxx b/src/fitting_ops.cxx index 95a05f2a..399cd1ad 100644 --- a/src/fitting_ops.cxx +++ b/src/fitting_ops.cxx @@ -11,6 +11,7 @@ #include #include +#include #include #include "so3g_numpy.h" @@ -19,9 +20,11 @@ #include "array_ops.h" #include "fitting_ops.h" + template void _least_squares(const double* x, const double* y, double* p, const int n, - const int nthreads=1, double* c=nullptr) + const int nthreads=1, const double* lower_bounds=nullptr, + const double* upper_bounds=nullptr, double* c=nullptr) { // Set up ceres problem ceres::Problem problem = CostFunction::create(n, x, y, p); @@ -31,6 +34,22 @@ void _least_squares(const double* x, const double* y, double* p, const int n, options.logging_type = ceres::LoggingType::SILENT; options.minimizer_progress_to_stdout = false; options.num_threads = nthreads; + + // Set bounds + if (lower_bounds) { + for (int i = 0; i < CostFunction::model::nparams; ++i) { + if (!std::isnan(lower_bounds[i])) { + problem.SetParameterLowerBound(p, i, lower_bounds[i]); + } + } + } + if (upper_bounds) { + for (int i = 0; i < CostFunction::model::nparams; ++i) { + if (!std::isnan(upper_bounds[i])) { + problem.SetParameterUpperBound(p, i, upper_bounds[i]); + } + } + } ceres::Solver::Summary summary; ceres::Solve(options, &problem, &summary); @@ -61,7 +80,9 @@ void _least_squares(const double* x, const double* y, double* p, const int n, else { for (int i = 0; i < CostFunction::model::nparams; ++i) { p[i] = std::numeric_limits::quiet_NaN(); - c[i] = std::numeric_limits::quiet_NaN(); + if (c) { + c[i] = std::numeric_limits::quiet_NaN(); + } } } } @@ -114,22 +135,27 @@ template void _compute_hessian(ceres::GradientProblem& problem, double* p, double* hessian, const double epsilon=1e-5) { - double gradient[Likelihood::model::nparams]; + double gradient_plus[Likelihood::model::nparams]; + double gradient_minus[Likelihood::model::nparams]; double perturbed_parameters[Likelihood::model::nparams]; - double perturbed_gradient[Likelihood::model::nparams]; double cost; - problem.Evaluate(p, &cost, gradient); - + // Loop over each parameter for (int i = 0; i < Likelihood::model::nparams; ++i) { + // Perturb in the positive direction std::copy(p, p + Likelihood::model::nparams, perturbed_parameters); perturbed_parameters[i] += epsilon; - problem.Evaluate(perturbed_parameters, &cost, perturbed_gradient); + problem.Evaluate(perturbed_parameters, &cost, gradient_plus); + + // Perturb in the negative direction + std::copy(p, p + Likelihood::model::nparams, perturbed_parameters); + perturbed_parameters[i] -= epsilon; + problem.Evaluate(perturbed_parameters, &cost, gradient_minus); - // Second derivative + // Second derivative approximation for (int j = 0; j < Likelihood::model::nparams; ++j) { hessian[i * Likelihood::model::nparams + j] = - (perturbed_gradient[j] - gradient[j]) / epsilon; + (gradient_plus[j] - gradient_minus[j]) / (2 * epsilon); } } } @@ -182,7 +208,9 @@ void _minimize(const double* x, const double* y, double* p, const int n, else { for (int i = 0; i < Likelihood::model::nparams; ++i) { p[i] = std::numeric_limits::quiet_NaN(); - c[i] = std::numeric_limits::quiet_NaN(); + if (c) { + c[i] = std::numeric_limits::quiet_NaN(); + } } } } @@ -240,32 +268,48 @@ void _fit_noise(const double* f, const double* log_f, const double* pxx, double wnest = _calculate_median(pxx + fwhite_i[0], fwhite_size); // Fit 1/f to line in logspace - double pfit[2] = {1., 1.}; - _least_squares(log_f, log_pxx, pfit, lowf_i + 1, 1); + double pfit[2] = {1., -1.}; + + // Some weak bounds (w > 0, alpha < 0) + double lower_bounds[2] = {0., std::numeric_limits::quiet_NaN()}; + double upper_bounds[2] = {std::numeric_limits::quiet_NaN(), 0.}; - // Find guess for fknee - int fi = 0; - double min_f = std::numeric_limits::max(); - for (int i = 0; i < nsamps; ++i) { - double model_val = CostFunc::model::eval(log_f[i], pfit); - double diff = std::abs(std::pow(10., model_val) - wnest); - if (diff <= min_f) { - min_f = diff; - fi = i; + _least_squares(log_f, log_pxx, pfit, lowf_i + 1, 1, + lower_bounds, upper_bounds); + + // Don't run minimization if least squares fit failed + if (std::isnan(pfit[0]) || std::isnan(pfit[1])) { + // Implict cast from double to T + for (int i = 0; i < Likelihood::model::nparams; ++i) { + p[i] = std::numeric_limits::quiet_NaN(); + c[i] = std::numeric_limits::quiet_NaN(); } } + else { + // Find guess for fknee + int fi = 0; + double min_f = std::numeric_limits::max(); + for (int i = 0; i < nsamps; ++i) { + double model_val = CostFunc::model::eval(log_f[i], pfit); + double diff = std::abs(std::pow(10., model_val) - wnest); + if (diff <= min_f) { + min_f = diff; + fi = i; + } + } - // Initial parameters - double p0[Likelihood::model::nparams] = {f[fi], wnest, -pfit[1]}; - double c0[Likelihood::model::nparams]; + // Initial parameters + double p0[Likelihood::model::nparams] = {f[fi], wnest, -pfit[1]}; + double c0[Likelihood::model::nparams]; - // Minimize - _minimize(f, pxx, p0, nsamps, tol, niter, c0, epsilon); + // Minimize + _minimize(f, pxx, p0, nsamps, tol, niter, c0, epsilon); - // Implict cast from double to T - for (int i = 0; i < Likelihood::model::nparams; ++i) { - p[i] = p0[i]; - c[i] = c0[i]; + // Implict cast from double to T + for (int i = 0; i < Likelihood::model::nparams; ++i) { + p[i] = p0[i]; + c[i] = c0[i]; + } } } @@ -277,6 +321,11 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, { using Likelihood = NegLogLikelihood; using CostFunc = CostFunction>; + + // Disable google logging to prevent failed fit warnings for each detector + google::InitGoogleLogging("logger"); + FLAGS_minloglevel = google::FATAL + 1; + FLAGS_logtostderr = false; BufferWrapper pxx_buf ("pxx", pxx, false, std::vector{-1, -1}); if (pxx_buf->strides[1] != pxx_buf->itemsize) @@ -303,22 +352,15 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, T* c_data = (T*)c_buf->buf; const int c_stride = c_buf->strides[0] / sizeof(T); - - if constexpr (std::is_same::value) { - // Copy f to double - double f_double[nsamps]; - - std::transform(f_data, f_data + nsamps, f_double, - [](float value) { return static_cast(value); }); - + if constexpr (std::is_same::value) { // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_double[i]); + log_f[i] = std::log10(f_data[i]); } #pragma omp parallel for @@ -327,30 +369,30 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - // Copy pxx row to double - double pxx_det[nsamps]; - - std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, - [](float value) { return static_cast(value); }); - - // Cast to double implicitly on assignment + double* pxx_det = pxx_data + ioff; T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_double, log_f, pxx_det, p_det, - c_det, ndets, nsamps, lowf_i, fwhite_i, - fwhite_size, tol, niter, epsilon); + _fit_noise(f_data, log_f, pxx_det, p_det, c_det, + ndets, nsamps, lowf_i, fwhite_i, fwhite_size, + tol, niter, epsilon); } } - else if constexpr (std::is_same::value) { + else if constexpr (std::is_same::value) { + // Copy f to double + double f_double[nsamps]; + + std::transform(f_data, f_data + nsamps, f_double, + [](float value) { return static_cast(value); }); + // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_data[i]); + log_f[i] = std::log10(f_double[i]); } #pragma omp parallel for @@ -359,13 +401,19 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - double* pxx_det = pxx_data + ioff; + // Copy pxx row to double + double pxx_det[nsamps]; + + std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, + [](float value) { return static_cast(value); }); + + // Cast implicitly on assignment T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_data, log_f, pxx_det, p_det, c_det, - ndets, nsamps, lowf_i, fwhite_i, fwhite_size, - tol, niter, epsilon); + _fit_noise(f_double, log_f, pxx_det, p_det, + c_det, ndets, nsamps, lowf_i, fwhite_i, + fwhite_size, tol, niter, epsilon); } } } @@ -393,15 +441,15 @@ PYBINDINGS("so3g") bp::def("fit_noise", fit_noise, "fit_noise(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon)" "\n" - "Fits noise model with white and 1/f components to the PSD of signal. Uses a MLE " + "Fits noise model with white and 1/f noise to the PSD of signal. Uses a MLE " "method that minimizes a log likelihood. OMP is used to parallelize across dets (rows)." "\n" "Args:\n" - " f: frequency array (float32/64) with dimensions (nsamps,).\n" - " pxx: PSD array (float32/64) with dimensions (ndets, nsamps).\n" - " p: parameter array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" - " c: uncertaintiy array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" - " lowf: Frequency below which the 1/f noise index and fknee are estimated for initial " + " f: frequency array (float32/64) with dimensions (nsamps,)\n" + " pxx: PSD array (float32/64) with dimensions (nsamps,)\n" + " p: parameter array (float32/64) with dimensions (nparams,). This is modified in place.\n" + " c: uncertaintiy array (float32/64) with dimensions (nsamps,). This is modified in place.\n" + " lowf: Frequency below which estimate of 1/f noise index and knee are estimated for initial " " guess passed to least_squares fit (float64).\n" " fwhite_lower: Lower frequency used to estimate white noise for initial guess passed to " " least_squares fit (float64).\n" @@ -410,5 +458,5 @@ PYBINDINGS("so3g") " tol: absolute tolerance for minimization (float64).\n" " niter: total number of iterations to run minimization for (int).\n" " epsilon: Value to perturb gradients by when calculating uncertainties with the inverse " - " Hessian matrix (float64). Affects minimization only.\n"); + " Hessian matrix (float64).\n"); } \ No newline at end of file diff --git a/test/test_fitting_ops.py b/test/test_fitting_ops.py index 2fa18c80..c7d5d69c 100644 --- a/test/test_fitting_ops.py +++ b/test/test_fitting_ops.py @@ -2,6 +2,7 @@ import so3g import numpy as np +from scipy.stats import chi2 class TestFitting(unittest.TestCase): @@ -13,7 +14,7 @@ def noise_model(f, p): fknee, w, alpha = p[0], p[1], p[2] return w * (1 + (fknee / f) ** alpha) - ndets = 3; + ndets = 1; nsamps = 1024 // 2 + 1 # Assume nperseg = 1024 for psd dtype = "float32" order = "C" @@ -39,11 +40,27 @@ def noise_model(f, p): so3g.fit_noise(f, pxx, so3g_params, so3g_sigmas, lowf, fwhite[0], fwhite[1], tol, niter, epsilon) + + ddof = nsamps - nparams + cval = chi2.ppf(0.95, ddof) - # Check if fitted parameters deviate from input by more than 1-sigma for i in range(ndets): - residual = np.abs(p0 - so3g_params[i]) / so3g_sigmas[i] - np.testing.assert_array_less(residual, 1.0) + fk, w, alpha = so3g_params[i] + sfk, sw, salpha = so3g_sigmas[i] + + # Calculate model error + fkf = (1 + fk / f) + + dmdw = fkf**alpha + dmdfk = w * alpha * fkf**(alpha - 1) * f**-1 + dmdalpha = w * (fkf**alpha) * np.log(alpha) + + err = np.sqrt((dmdw*sw)**2 + (dmdfk*sfk)**2 + (dmdalpha*salpha)**2) + + model = noise_model(f, so3g_params[i]) + chi_sq = np.sum((pxx[i] - model)**2 / (err[i])**2) + + np.testing.assert_array_less(chi_sq, cval) if __name__ == "__main__": From 8416c1bfde1d0aabdf95b438d0093728dbe8b588 Mon Sep 17 00:00:00 2001 From: Michael McCrackan Date: Mon, 4 Nov 2024 07:23:11 -0800 Subject: [PATCH 13/13] update docstring --- cmake/FindGFlags.cmake | 2 +- src/fitting_ops.cxx | 67 +++++++++++++++++++++--------------------- 2 files changed, 35 insertions(+), 34 deletions(-) diff --git a/cmake/FindGFlags.cmake b/cmake/FindGFlags.cmake index 9473cf23..6b2197f9 100644 --- a/cmake/FindGFlags.cmake +++ b/cmake/FindGFlags.cmake @@ -20,4 +20,4 @@ include (FindPackageHandleStandardArgs) find_package_handle_standard_args (Gflags DEFAULT_MSG GFLAGS_LIBRARIES GFLAGS_INCLUDE_DIRS) mark_as_advanced(GFLAGS_LIBRARY_DEBUG GFLAGS_LIBRARY_RELEASE - GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) \ No newline at end of file + GFLAGS_LIBRARY GFLAGS_INCLUDE_DIR GFLAGS_ROOT_DIR) \ No newline at end of file diff --git a/src/fitting_ops.cxx b/src/fitting_ops.cxx index 399cd1ad..8d09731d 100644 --- a/src/fitting_ops.cxx +++ b/src/fitting_ops.cxx @@ -352,15 +352,21 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, T* c_data = (T*)c_buf->buf; const int c_stride = c_buf->strides[0] / sizeof(T); - if constexpr (std::is_same::value) { + if constexpr (std::is_same::value) { + // Copy f to double + double f_double[nsamps]; + + std::transform(f_data, f_data + nsamps, f_double, + [](float value) { return static_cast(value); }); + // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_data[i]); + log_f[i] = std::log10(f_double[i]); } #pragma omp parallel for @@ -369,30 +375,30 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - double* pxx_det = pxx_data + ioff; + // Copy pxx row to double + double pxx_det[nsamps]; + + std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, + [](float value) { return static_cast(value); }); + + // Cast implicitly on assignment T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_data, log_f, pxx_det, p_det, c_det, - ndets, nsamps, lowf_i, fwhite_i, fwhite_size, - tol, niter, epsilon); + _fit_noise(f_double, log_f, pxx_det, p_det, + c_det, ndets, nsamps, lowf_i, fwhite_i, + fwhite_size, tol, niter, epsilon); } } - else if constexpr (std::is_same::value) { - // Copy f to double - double f_double[nsamps]; - - std::transform(f_data, f_data + nsamps, f_double, - [](float value) { return static_cast(value); }); - + else if constexpr (std::is_same::value) { // Get frequency bounds auto [lowf_i, fwhite_i, fwhite_size] = - _get_frequency_limits(f_double, lowf, fwhite_lower, fwhite_upper, nsamps); + _get_frequency_limits(f_data, lowf, fwhite_lower, fwhite_upper, nsamps); // Fit in logspace double log_f[nsamps]; for (int i = 0; i < nsamps; ++i) { - log_f[i] = std::log10(f_double[i]); + log_f[i] = std::log10(f_data[i]); } #pragma omp parallel for @@ -401,19 +407,13 @@ void _fit_noise_buffer(const bp::object & f, const bp::object & pxx, int poff = i * p_stride; int coff = i * c_stride; - // Copy pxx row to double - double pxx_det[nsamps]; - - std::transform(pxx_data + ioff, pxx_data + ioff + nsamps, pxx_det, - [](float value) { return static_cast(value); }); - - // Cast implicitly on assignment + double* pxx_det = pxx_data + ioff; T* p_det = p_data + poff; T* c_det = c_data + coff; - _fit_noise(f_double, log_f, pxx_det, p_det, - c_det, ndets, nsamps, lowf_i, fwhite_i, - fwhite_size, tol, niter, epsilon); + _fit_noise(f_data, log_f, pxx_det, p_det, c_det, + ndets, nsamps, lowf_i, fwhite_i, fwhite_size, + tol, niter, epsilon); } } } @@ -436,20 +436,21 @@ void fit_noise(const bp::object & f, const bp::object & pxx, bp::object & p, bp: } } + PYBINDINGS("so3g") { bp::def("fit_noise", fit_noise, "fit_noise(f, pxx, p, c, lowf, fwhite_lower, fwhite_upper, tol, niter, epsilon)" "\n" - "Fits noise model with white and 1/f noise to the PSD of signal. Uses a MLE " + "Fits noise model with white and 1/f components to the PSD of signal. Uses a MLE " "method that minimizes a log likelihood. OMP is used to parallelize across dets (rows)." "\n" "Args:\n" - " f: frequency array (float32/64) with dimensions (nsamps,)\n" - " pxx: PSD array (float32/64) with dimensions (nsamps,)\n" - " p: parameter array (float32/64) with dimensions (nparams,). This is modified in place.\n" - " c: uncertaintiy array (float32/64) with dimensions (nsamps,). This is modified in place.\n" - " lowf: Frequency below which estimate of 1/f noise index and knee are estimated for initial " + " f: frequency array (float32/64) with dimensions (nsamps,).\n" + " pxx: PSD array (float32/64) with dimensions (ndets, nsamps).\n" + " p: parameter array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" + " c: uncertaintiy array (float32/64) with dimensions (ndets, nparams). This is modified in place.\n" + " lowf: Frequency below which the 1/f noise index and fknee are estimated for initial " " guess passed to least_squares fit (float64).\n" " fwhite_lower: Lower frequency used to estimate white noise for initial guess passed to " " least_squares fit (float64).\n" @@ -458,5 +459,5 @@ PYBINDINGS("so3g") " tol: absolute tolerance for minimization (float64).\n" " niter: total number of iterations to run minimization for (int).\n" " epsilon: Value to perturb gradients by when calculating uncertainties with the inverse " - " Hessian matrix (float64).\n"); + " Hessian matrix (float64). Affects minimization only.\n"); } \ No newline at end of file