From 5daae2bc3d47707ae3504f4c55f1fa763e90fbdf Mon Sep 17 00:00:00 2001 From: Sangtae Kim Date: Mon, 25 Jun 2018 09:15:45 -0700 Subject: [PATCH] Update source formatting for v2.9.5 release --- docs/userGuide/errorAnalysis.md | 4 ++-- .../IndelModelProduction.cpp | 2 +- .../SequenceAlleleCountsPosProcessor.cpp | 6 ++--- src/c++/lib/blt_util/test/logSumUtilTest.cpp | 8 +++---- src/c++/lib/errorAnalysis/BasecallCounts.hh | 24 +++++++++---------- src/c++/lib/errorAnalysis/IndelCounts.hh | 4 ++-- .../germline/bin/evs_learn.py | 4 ++-- .../somatic/bin/evs_learn.py | 2 +- 8 files changed, 27 insertions(+), 27 deletions(-) diff --git a/docs/userGuide/errorAnalysis.md b/docs/userGuide/errorAnalysis.md index 68b1549f..0abdd81d 100644 --- a/docs/userGuide/errorAnalysis.md +++ b/docs/userGuide/errorAnalysis.md @@ -12,7 +12,7 @@ Error Pattern Analyzer User Guide * [Outputs](#outputs) * [Counts files](#counts-files) * [Error model output](#error-model-output) -* [Error counting workflow configuration and execution](#error-counting-workflow-configuration-and-execution) +* [Allele counting workflow configuration and execution](#allele-counting-workflow-configuration-and-execution) * [Configuration](#configuration) * [Configuration: Excluding regions](#configuration-excluding-regions) * [Configuration: Annotating known variants](#configuration-annotating-known-variants) @@ -20,7 +20,7 @@ Error Pattern Analyzer User Guide * [Execution](#execution) * [Advanced execution options](#advanced-execution-options) * [`--quiet`](#--quiet) -* [Viewing error counting workflow ouput](#viewing-error-counting-workflow-ouput) +* [Viewing allele counting workflow ouput](#viewing-allele-counting-workflow-ouput) * [Summary output](#summary-output) * [Excluding basecalls/indels](#excluding-basecallsindels) * [Extended output (for model development)](#extended-output-for-model-development) diff --git a/src/c++/lib/applications/EstimateVariantErrorRates/IndelModelProduction.cpp b/src/c++/lib/applications/EstimateVariantErrorRates/IndelModelProduction.cpp index 37ac8b5d..819aa9c7 100644 --- a/src/c++/lib/applications/EstimateVariantErrorRates/IndelModelProduction.cpp +++ b/src/c++/lib/applications/EstimateVariantErrorRates/IndelModelProduction.cpp @@ -557,7 +557,7 @@ exportModelUsingInputJson( static const unsigned expectedJsonModelCount(1); const unsigned jsonModelCount(indelErrorModelsJson.getIndelErrorModels().size()); - if(jsonModelCount != expectedJsonModelCount) + if (jsonModelCount != expectedJsonModelCount) { std::ostringstream oss; oss << "Expecting indel error model file to describe exactly " << expectedJsonModelCount diff --git a/src/c++/lib/applications/GetSequenceAlleleCounts/SequenceAlleleCountsPosProcessor.cpp b/src/c++/lib/applications/GetSequenceAlleleCounts/SequenceAlleleCountsPosProcessor.cpp index 16e2962c..ed0629b9 100644 --- a/src/c++/lib/applications/GetSequenceAlleleCounts/SequenceAlleleCountsPosProcessor.cpp +++ b/src/c++/lib/applications/GetSequenceAlleleCounts/SequenceAlleleCountsPosProcessor.cpp @@ -163,7 +163,7 @@ resetRegion( if (_maxNormalSampleDepthForCandidateVariants > 0.) { _maxNormalSampleDepthForCandidateVariants = std::min(_maxNormalSampleDepthForCandidateVariants, - static_cast(_opt.max_candidate_indel_depth)); + static_cast(_opt.max_candidate_indel_depth)); } else { @@ -311,7 +311,7 @@ mergeIndelObservations( const IndelCounts::Context& context, const IndelCounts::SingleSampleCandidateVariantContextObservationPattern& indelObservation, std::map& mergedIndelObservations) + IndelCounts::SingleSampleCandidateVariantContextObservationPattern>& mergedIndelObservations) { using namespace IndelCounts; @@ -673,7 +673,7 @@ process_pos_error_counts( if (referenceSTRContext.STRRepeatCount == std::min(maxSTRRepeatCount, indelReportInfo.refRepeatCount)) { context = IndelCounts::Context( - referenceSTRContext.patternSize, referenceSTRContext.STRRepeatCount); + referenceSTRContext.patternSize, referenceSTRContext.STRRepeatCount); } } diff --git a/src/c++/lib/blt_util/test/logSumUtilTest.cpp b/src/c++/lib/blt_util/test/logSumUtilTest.cpp index 9146ac26..e01f590d 100644 --- a/src/c++/lib/blt_util/test/logSumUtilTest.cpp +++ b/src/c++/lib/blt_util/test/logSumUtilTest.cpp @@ -149,7 +149,7 @@ BOOST_AUTO_TEST_CASE( benchmarkLogSums ) TimeTracker tt; tt.resume(); double sum(0); - for (unsigned i(0); i minValue; value *= valueFactor) { @@ -164,7 +164,7 @@ BOOST_AUTO_TEST_CASE( benchmarkLogSums ) TimeTracker tt; tt.resume(); double sum(0); - for (unsigned i(0); i minValue; value *= valueFactor) { @@ -180,7 +180,7 @@ BOOST_AUTO_TEST_CASE( benchmarkLogSums ) TimeTracker tt; tt.resume(); float sum(0); - for (unsigned i(0); i minValue; value *= valueFactor) { @@ -195,7 +195,7 @@ BOOST_AUTO_TEST_CASE( benchmarkLogSums ) TimeTracker tt; tt.resume(); float sum(0); - for (unsigned i(0); i minValue; value *= valueFactor) { diff --git a/src/c++/lib/errorAnalysis/BasecallCounts.hh b/src/c++/lib/errorAnalysis/BasecallCounts.hh index 049b1d02..0b516f8c 100644 --- a/src/c++/lib/errorAnalysis/BasecallCounts.hh +++ b/src/c++/lib/errorAnalysis/BasecallCounts.hh @@ -56,7 +56,7 @@ struct Context Archive& ar, const unsigned /* version */) { - ar & repeatCount; + ar& repeatCount; } unsigned repeatCount = 1; @@ -146,10 +146,10 @@ struct SingleSampleContextObservationPattern Archive& ar, const unsigned /* version */) { - ar & strand0.refAlleleCount; - ar & strand0.altAlleleCount; - ar & strand1.refAlleleCount; - ar & strand1.altAlleleCount; + ar& strand0.refAlleleCount; + ar& strand0.altAlleleCount; + ar& strand1.refAlleleCount; + ar& strand1.altAlleleCount; } @@ -344,12 +344,12 @@ struct ContextData { // adding error.data instead of error here to reduce the total // serialization template depth: - ar & counts.data; - ar & counts.refAlleleBasecallErrorPhredProbs; - ar & excludedRegionSkipped; - ar & depthSkipped; - ar & emptySkipped; - ar & noiseSkipped; + ar& counts.data; + ar& counts.refAlleleBasecallErrorPhredProbs; + ar& excludedRegionSkipped; + ar& depthSkipped; + ar& emptySkipped; + ar& noiseSkipped; } SingleSampleContextData counts; @@ -423,7 +423,7 @@ public: Archive& ar, const unsigned /* version */) { - ar & _data; + ar& _data; } private: diff --git a/src/c++/lib/errorAnalysis/IndelCounts.hh b/src/c++/lib/errorAnalysis/IndelCounts.hh index b01f1307..b8a1fc94 100644 --- a/src/c++/lib/errorAnalysis/IndelCounts.hh +++ b/src/c++/lib/errorAnalysis/IndelCounts.hh @@ -158,8 +158,8 @@ struct Context Archive& ar, const unsigned /* version */) { - ar & repeatPatternSize; - ar & repeatCount; + ar& repeatPatternSize; + ar& repeatCount; } unsigned getRepeatPatternSize() const diff --git a/src/python/scoringModelTraining/germline/bin/evs_learn.py b/src/python/scoringModelTraining/germline/bin/evs_learn.py index 0dcff2d8..4357f1db 100755 --- a/src/python/scoringModelTraining/germline/bin/evs_learn.py +++ b/src/python/scoringModelTraining/germline/bin/evs_learn.py @@ -112,7 +112,7 @@ def getDataSet(inputs, args) : print "Reading '%s'" % (inputFile) df = pandas.read_csv(inputFile, na_values=".") df.fillna("0", inplace=True) - # Remove false negatives before any subsampling: + # Remove false negatives before any subsampling: df = df[df["tag"] != "FN"] if args.sample_input: @@ -129,7 +129,7 @@ def getDataSet(inputs, args) : else: print "TP: %d FP: %d" % (tps.shape[0], fps.shape[0]) if tps.shape[0] < fps.shape[0]: - rows_selected = random.sample(fps.index, tps.shape[0]) + rows_selected = random.sample(fps.index, tps.shape[0]) fps = pandas.DataFrame(fps.ix[rows_selected]) elif fps.shape[0] < tps.shape[0]: rows_selected = random.sample(tps.index, fps.shape[0]) diff --git a/src/python/scoringModelTraining/somatic/bin/evs_learn.py b/src/python/scoringModelTraining/somatic/bin/evs_learn.py index cee9475a..dffa0894 100755 --- a/src/python/scoringModelTraining/somatic/bin/evs_learn.py +++ b/src/python/scoringModelTraining/somatic/bin/evs_learn.py @@ -128,7 +128,7 @@ def getDataSet(inputs, sample_input, balance_per_sample) : rows_selected = random.sample(fps.index, tps.shape[0]) fps = pandas.DataFrame(fps.ix[rows_selected]) elif fps.shape[0] < tps.shape[0]: - rows_selected = random.sample(tps.index, fps.shape[0]) + rows_selected = random.sample(tps.index, fps.shape[0]) tps = pandas.DataFrame(tps.ix[rows_selected]) print "Downsampled to TP: %d FP: %d" % (tps.shape[0], fps.shape[0]) df = pandas.concat([tps, fps])