Skip to content

Commit

Permalink
STYLE: Prefer obtaining the cluster filename automatically from Python
Browse files Browse the repository at this point in the history
Prefer obtaining the module filenames automatically from Python: fixes
messages where the filename is incorrect.

Left behind in commit becfe63.
  • Loading branch information
jhlegarreta committed Nov 30, 2023
1 parent 24528f4 commit 4d344c5
Show file tree
Hide file tree
Showing 5 changed files with 24 additions and 24 deletions.
2 changes: 1 addition & 1 deletion bin/wm_quality_control_cluster_measurements.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ def main():

## output_dir = args.outputDirectory
## if not os.path.exists(output_dir):
## print "<register> Output directory", output_dir, "does not exist, creating it."
## print f"{os.path.basename(__file__)} Output directory", output_dir, "does not exist, creating it."
## os.makedirs(output_dir)

measurement_list = wma.tract_measurement.load_measurement_in_folder(args.inputDirectory, hierarchy = 'Column', separator = 'Tab')
Expand Down
2 changes: 1 addition & 1 deletion bin/wm_register_multisubject_faster.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def main():


#points_per_fiber = args.pointsPerFiber
#print "<register> Number of points for fiber representation: ", points_per_fiber
#print f"<{os.path.basename(__file__)}> Number of points for fiber representation: ", points_per_fiber

if args.flag_norender:
print(f"<{os.path.basename(__file__)}> No rendering (for compute servers without X connection).")
Expand Down
30 changes: 15 additions & 15 deletions whitematteranalysis/cluster.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,18 +93,18 @@ def save(self, directory, atlas_name):
def load(self, directory, atlas_name, verbose=False):
if not os.path.isdir(directory):
print("Error: Atlas directory", directory, "does not exist or is not a directory.")
raise "<cluster.py> I/O error"
raise f"{os.path.basename(__file__)} I/O error"

fname_base = os.path.join(directory,atlas_name)
fname_atlas = fname_base+'.p'
fname_polydata = fname_base+'.vtp'

if not os.path.exists(fname_atlas):
print("Error: Atlas file", fname_atlas, "does not exist.")
raise "<cluster.py> I/O error"
raise f"{os.path.basename(__file__)} I/O error"
if not os.path.exists(fname_polydata):
print("Error: Atlas file", fname_polydata, "does not exist.")
raise "<cluster.py> I/O error"
raise f"{os.path.basename(__file__)} I/O error"

try:
atlas = pickle.load(open(fname_atlas,'rb'))
Expand Down Expand Up @@ -333,33 +333,33 @@ def spectral(input_polydata, number_of_clusters=200,
# C is not computed.
# Calculate the sum of the partial rows we've computed:
atlas.row_sum_1 = np.sum(A, axis=0) + np.sum(B.T, axis=0)
#print "<cluster.py> A size:", A.shape
#print "<cluster.py> B size:", B.shape
#print "<cluster.py> A-B matrix row sums range (should be > 0):", np.min(atlas.row_sum_1), np.max(atlas.row_sum_1)
#print f"<{os.path.basename(__file__)}> A size:", A.shape
#print f"<{os.path.basename(__file__)}> B size:", B.shape
#print f"<{os.path.basename(__file__)}> A-B matrix row sums range (should be > 0):", np.min(atlas.row_sum_1), np.max(atlas.row_sum_1)

# Approximate the sum of the rest of the data (including C)
# These are weighted sums of the columns we did compute
# where the weight depends on how similar that fiber
# was to each path in A. This uses the dual basis
# of the columns in A.
# Approximate the inverse of A for dual basis
#print "<cluster.py> Using numpy linalg pinv A"
#print f"<{os.path.basename(__file__)}> Using numpy linalg pinv A"
atlas.pinv_A = np.linalg.pinv(A)

#e_val, e_vec = np.linalg.eigh(atlas.pinv_A)
#print "<cluster.py> test of non-normalized A pseudoinverse Eigenvalue range:", e_val[0], e_val[-1]
#print f"<{os.path.basename(__file__)}> test of non-normalized A pseudoinverse Eigenvalue range:", e_val[0], e_val[-1]

# row sum formula:
# dhat = [a_r + b_r; b_c + B^T*A-1*b_r]
# this matrix is A^-1 * b_r, where b_r are the row sums of B
# matlab was: atlas.approxRowSumMatrix = sum(B',1)*atlas.pseudoInverseA;
atlas.row_sum_matrix = np.dot(np.sum(B.T, axis=0), atlas.pinv_A)
#test = np.sum(B.T, axis=0)
#print "<cluster.py> B column sums range (should be > 0):", np.min(test), np.max(test)
#print f"<{os.path.basename(__file__)}> B column sums range (should be > 0):", np.min(test), np.max(test)
print(f"<{os.path.basename(__file__)}> Range of row sum weights:", np.min(atlas.row_sum_matrix), np.max(atlas.row_sum_matrix))
#print "<cluster.py> First 10 entries in weight matrix:", atlas.row_sum_matrix[0:10]
#print f"<{os.path.basename(__file__)}> First 10 entries in weight matrix:", atlas.row_sum_matrix[0:10]
#test = np.dot(atlas.row_sum_matrix, B)
#print "<cluster.py> Test partial sum estimation for B:", np.min(test), np.max(test)
#print f"<{os.path.basename(__file__)}> Test partial sum estimation for B:", np.min(test), np.max(test)
#del test

# row sum estimate for current B part of the matrix
Expand Down Expand Up @@ -503,7 +503,7 @@ def spectral(input_polydata, number_of_clusters=200,
centroid_order = render.argsort_by_jet_lookup_table(color)
atlas.centroids = centroids[centroid_order,:]
cluster_idx, dist = scipy.cluster.vq.vq(embed, atlas.centroids)
#print "<cluster.py> Distortion metric:", cluster_metric
#print f"<{os.path.basename(__file__)}> Distortion metric:", cluster_metric
if 0:
# This is extremely slow, but leave code here if ever wanted for testing
cluster_metric = metrics.silhouette_score(embed, cluster_idx, metric='sqeuclidean')
Expand All @@ -513,7 +513,7 @@ def spectral(input_polydata, number_of_clusters=200,
print("ERROR: Unknown centroid finder", centroid_finder)
## # This found fewer clusters than we need to represent the anatomy well
## # Leave code here in case wanted in future for more testing.
## print '<cluster.py> Affinity Propagation clustering in embedding space.'
## print f'<{os.path.basename(__file__)}> Affinity Propagation clustering in embedding space.'
## af = AffinityPropagation(preference=-50).fit(embed)
## cluster_centers_indices = af.cluster_centers_indices_
## labels = af.labels_
Expand Down Expand Up @@ -1041,7 +1041,7 @@ def output_and_quality_control_cluster_atlas(atlas, output_polydata_s, subject_f
else:
cluster_colors.append([0,0,0])
del pd_c
print("\n<cluster.py> Finished saving individual clusters as polydata files.")
print(f"\n<{os.path.basename(__file__)}> Finished saving individual clusters as polydata files.")

# Notify user if some clusters empty
empty_count = 0
Expand Down
6 changes: 3 additions & 3 deletions whitematteranalysis/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -287,7 +287,7 @@ def downsample(inpd, output_number_of_lines, return_indices=False, preserve_poin
outpd = mask(inpd, fiber_mask, preserve_point_data=preserve_point_data, preserve_cell_data=preserve_cell_data, verbose=verbose)

# final line count
#print "<filter.py> Number of lines selected:", outpd.GetNumberOfLines()
#print f"<{os.path.basename(__file__)}> Number of lines selected:", outpd.GetNumberOfLines()
if return_indices:
# return sorted indices, this is the line ordering of output
# polydata (because we mask rather than changing input line order)
Expand Down Expand Up @@ -1134,7 +1134,7 @@ def pd_to_array(inpd, dims=225):
# progress
#if verbose:
# if lidx % 1 == 0:
# print "<filter.py> Line:", lidx, "/", inpd.GetNumberOfLines()
# print f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines()
inpd.GetLines().GetNextCell(ptids)
num_points = ptids.GetNumberOfIds()
for pidx in range(0, num_points):
Expand Down Expand Up @@ -1176,7 +1176,7 @@ def measure_line_lengths(inpd):
# progress
#if verbose:
# if lidx % 1 == 0:
# print "<filter.py> Line:", lidx, "/", inpd.GetNumberOfLines()
# print f"<{os.path.basename(__file__)}> Line:", lidx, "/", inpd.GetNumberOfLines()
inpd.GetLines().GetNextCell(ptids)
output_lengths[lidx] = ptids.GetNumberOfIds()
return(output_lengths)
8 changes: 4 additions & 4 deletions whitematteranalysis/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def transform_polydatas_from_diskUNSAFE(input_dir, transforms, output_dir, paral
os.remove(fname)

#for idx in range(0, len(input_pd_fnames)):
#print "<io.py> ", idx + 1, "/", num_pd, subject_id, " Transforming ", in_filename, "->", out_filename, "..."
#print f"<{os.path.basename(__file__)}> ", idx + 1, "/", num_pd, subject_id, " Transforming ", in_filename, "->", out_filename, "..."
#transform_polydata_from_disk(in_filename, transform, out_filename)

def transform_polydatas_from_diskOLD(input_dir, transforms, output_dir):
Expand Down Expand Up @@ -707,7 +707,7 @@ def write(self, dirname, savedist=False):
## matplotlib.pyplot.close()
## print "g"
## except Exception:
## print "<io.py> matplotlib was unable to write histogram."
## print(f"<{os.path.basename(__file__)}> matplotlib was unable to write histogram.")
## raise
## print "1"
# generate fiber visualization
Expand All @@ -716,8 +716,8 @@ def write(self, dirname, savedist=False):
#ren = render.render(self.polydata)
#ren.save_views(dirname)
#except Exception:
# print "<io.py> vtk or rendering issue. Failed to save views."
# print "<io.py> polydata was saved to disk so you can re-render."
# print(f"<{os.path.basename(__file__)}> vtk or rendering issue. Failed to save views.")
# print(f"<{os.path.basename(__file__)}> polydata was saved to disk so you can re-render.")
# raise

#print "IMPLEMENT SAVING OF PARAMETERS TOO"
Expand Down

0 comments on commit 4d344c5

Please sign in to comment.