From 4d344c554802c7b577c39a13f722cc571a0101fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jon=20Haitz=20Legarreta=20Gorro=C3=B1o?= Date: Sun, 22 Oct 2023 12:49:44 -0400 Subject: [PATCH] STYLE: Prefer obtaining the cluster filename automatically from Python Prefer obtaining the module filenames automatically from Python: fixes messages where the filename is incorrect. Left behind in commit becfe63. --- ...wm_quality_control_cluster_measurements.py | 2 +- bin/wm_register_multisubject_faster.py | 2 +- whitematteranalysis/cluster.py | 30 +++++++++---------- whitematteranalysis/filter.py | 6 ++-- whitematteranalysis/io.py | 8 ++--- 5 files changed, 24 insertions(+), 24 deletions(-) diff --git a/bin/wm_quality_control_cluster_measurements.py b/bin/wm_quality_control_cluster_measurements.py index 6160dd63..e5bee8d9 100755 --- a/bin/wm_quality_control_cluster_measurements.py +++ b/bin/wm_quality_control_cluster_measurements.py @@ -47,7 +47,7 @@ def main(): ## output_dir = args.outputDirectory ## if not os.path.exists(output_dir): - ## print " Output directory", output_dir, "does not exist, creating it." + ## print f"{os.path.basename(__file__)} Output directory", output_dir, "does not exist, creating it." ## os.makedirs(output_dir) measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab') diff --git a/bin/wm_register_multisubject_faster.py b/bin/wm_register_multisubject_faster.py index 32ad7898..8b7f0763 100755 --- a/bin/wm_register_multisubject_faster.py +++ b/bin/wm_register_multisubject_faster.py @@ -106,7 +106,7 @@ def main(): #points_per_fiber = args.pointsPerFiber - #print " Number of points for fiber representation: ", points_per_fiber + #print f"<{os.path.basename(__file__)}> Number of points for fiber representation: ", points_per_fiber if args.flag_norender: print(f"<{os.path.basename(__file__)}> No rendering (for compute servers without X connection).") diff --git a/whitematteranalysis/cluster.py b/whitematteranalysis/cluster.py index 3437727e..05f0e7e6 100644 --- a/whitematteranalysis/cluster.py +++ b/whitematteranalysis/cluster.py @@ -93,7 +93,7 @@ def save(self, directory, atlas_name): def load(self, directory, atlas_name, verbose=False): if not os.path.isdir(directory): print("Error: Atlas directory", directory, "does not exist or is not a directory.") - raise " I/O error" + raise f"{os.path.basename(__file__)} I/O error" fname_base = os.path.join(directory,atlas_name) fname_atlas = fname_base+'.p' @@ -101,10 +101,10 @@ def load(self, directory, atlas_name, verbose=False): if not os.path.exists(fname_atlas): print("Error: Atlas file", fname_atlas, "does not exist.") - raise " I/O error" + raise f"{os.path.basename(__file__)} I/O error" if not os.path.exists(fname_polydata): print("Error: Atlas file", fname_polydata, "does not exist.") - raise " I/O error" + raise f"{os.path.basename(__file__)} I/O error" try: atlas = pickle.load(open(fname_atlas,'rb')) @@ -333,21 +333,21 @@ def spectral(input_polydata, number_of_clusters=200, # C is not computed. # Calculate the sum of the partial rows we've computed: atlas.row_sum_1 = np.sum(A, axis=0) + np.sum(B.T, axis=0) - #print " A size:", A.shape - #print " B size:", B.shape - #print " A-B matrix row sums range (should be > 0):", np.min(atlas.row_sum_1), np.max(atlas.row_sum_1) - + #print f"<{os.path.basename(__file__)}> A size:", A.shape + #print f"<{os.path.basename(__file__)}> B size:", B.shape + #print f"<{os.path.basename(__file__)}> A-B matrix row sums range (should be > 0):", np.min(atlas.row_sum_1), np.max(atlas.row_sum_1) + # Approximate the sum of the rest of the data (including C) # These are weighted sums of the columns we did compute # where the weight depends on how similar that fiber # was to each path in A. This uses the dual basis # of the columns in A. # Approximate the inverse of A for dual basis - #print " Using numpy linalg pinv A" + #print f"<{os.path.basename(__file__)}> Using numpy linalg pinv A" atlas.pinv_A = np.linalg.pinv(A) #e_val, e_vec = np.linalg.eigh(atlas.pinv_A) - #print " test of non-normalized A pseudoinverse Eigenvalue range:", e_val[0], e_val[-1] + #print f"<{os.path.basename(__file__)}> test of non-normalized A pseudoinverse Eigenvalue range:", e_val[0], e_val[-1] # row sum formula: # dhat = [a_r + b_r; b_c + B^T*A-1*b_r] @@ -355,11 +355,11 @@ def spectral(input_polydata, number_of_clusters=200, # matlab was: atlas.approxRowSumMatrix = sum(B',1)*atlas.pseudoInverseA; atlas.row_sum_matrix = np.dot(np.sum(B.T, axis=0), atlas.pinv_A) #test = np.sum(B.T, axis=0) - #print " B column sums range (should be > 0):", np.min(test), np.max(test) + #print f"<{os.path.basename(__file__)}> B column sums range (should be > 0):", np.min(test), np.max(test) print(f"<{os.path.basename(__file__)}> Range of row sum weights:", np.min(atlas.row_sum_matrix), np.max(atlas.row_sum_matrix)) - #print " First 10 entries in weight matrix:", atlas.row_sum_matrix[0:10] + #print f"<{os.path.basename(__file__)}> First 10 entries in weight matrix:", atlas.row_sum_matrix[0:10] #test = np.dot(atlas.row_sum_matrix, B) - #print " Test partial sum estimation for B:", np.min(test), np.max(test) + #print f"<{os.path.basename(__file__)}> Test partial sum estimation for B:", np.min(test), np.max(test) #del test # row sum estimate for current B part of the matrix @@ -503,7 +503,7 @@ def spectral(input_polydata, number_of_clusters=200, centroid_order = render.argsort_by_jet_lookup_table(color) atlas.centroids = centroids[centroid_order,:] cluster_idx, dist = scipy.cluster.vq.vq(embed, atlas.centroids) - #print " Distortion metric:", cluster_metric + #print f"<{os.path.basename(__file__)}> Distortion metric:", cluster_metric if 0: # This is extremely slow, but leave code here if ever wanted for testing cluster_metric = metrics.silhouette_score(embed, cluster_idx, metric='sqeuclidean') @@ -513,7 +513,7 @@ def spectral(input_polydata, number_of_clusters=200, print("ERROR: Unknown centroid finder", centroid_finder) ## # This found fewer clusters than we need to represent the anatomy well ## # Leave code here in case wanted in future for more testing. - ## print ' Affinity Propagation clustering in embedding space.' + ## print f'<{os.path.basename(__file__)}> Affinity Propagation clustering in embedding space.' ## af = AffinityPropagation(preference=-50).fit(embed) ## cluster_centers_indices = af.cluster_centers_indices_ ## labels = af.labels_ @@ -1041,7 +1041,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f else: cluster_colors.append([0,0,0]) del pd_c - print("\n Finished saving individual clusters as polydata files.") + print(f"\n<{os.path.basename(__file__)}> Finished saving individual clusters as polydata files.") # Notify user if some clusters empty empty_count = 0 diff --git a/whitematteranalysis/filter.py b/whitematteranalysis/filter.py index e9e6ece0..f0f67ad8 100644 --- a/whitematteranalysis/filter.py +++ b/whitematteranalysis/filter.py @@ -287,7 +287,7 @@ def downsample(inpd, output_number_of_lines, return_indices=False, preserve_poin outpd = mask(inpd, fiber_mask, preserve_point_data=preserve_point_data, preserve_cell_data=preserve_cell_data, verbose=verbose) # final line count - #print " Number of lines selected:", outpd.GetNumberOfLines() + #print f"<{os.path.basename(__file__)}> Number of lines selected:", outpd.GetNumberOfLines() if return_indices: # return sorted indices, this is the line ordering of output # polydata (because we mask rather than changing input line order) @@ -1134,7 +1134,7 @@ def pd_to_array(inpd, dims=225): # progress #if verbose: # if lidx % 1 == 0: - # print " Line:", lidx, "/", inpd.GetNumberOfLines() + # print f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines() inpd.GetLines().GetNextCell(ptids) num_points = ptids.GetNumberOfIds() for pidx in range(0, num_points): @@ -1176,7 +1176,7 @@ def measure_line_lengths(inpd): # progress #if verbose: # if lidx % 1 == 0: - # print " Line:", lidx, "/", inpd.GetNumberOfLines() + # print f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines() inpd.GetLines().GetNextCell(ptids) output_lengths[lidx] = ptids.GetNumberOfIds() return(output_lengths) diff --git a/whitematteranalysis/io.py b/whitematteranalysis/io.py index da4df42c..23b92002 100644 --- a/whitematteranalysis/io.py +++ b/whitematteranalysis/io.py @@ -349,7 +349,7 @@ def transform_polydatas_from_diskUNSAFE(input_dir, transforms, output_dir, paral os.remove(fname) #for idx in range(0, len(input_pd_fnames)): - #print " ", idx + 1, "/", num_pd, subject_id, " Transforming ", in_filename, "->", out_filename, "..." + #print f"<{os.path.basename(__file__)}> ", idx + 1, "/", num_pd, subject_id, " Transforming ", in_filename, "->", out_filename, "..." #transform_polydata_from_disk(in_filename, transform, out_filename) def transform_polydatas_from_diskOLD(input_dir, transforms, output_dir): @@ -707,7 +707,7 @@ def write(self, dirname, savedist=False): ## matplotlib.pyplot.close() ## print "g" ## except Exception: - ## print " matplotlib was unable to write histogram." + ## print(f"<{os.path.basename(__file__)}> matplotlib was unable to write histogram.") ## raise ## print "1" # generate fiber visualization @@ -716,8 +716,8 @@ def write(self, dirname, savedist=False): #ren = render.render(self.polydata) #ren.save_views(dirname) #except Exception: - # print " vtk or rendering issue. Failed to save views." - # print " polydata was saved to disk so you can re-render." + # print(f"<{os.path.basename(__file__)}> vtk or rendering issue. Failed to save views.") + # print(f"<{os.path.basename(__file__)}> polydata was saved to disk so you can re-render.") # raise #print "IMPLEMENT SAVING OF PARAMETERS TOO"