Skip to content

Commit

Permalink
fix compiler warnings
Browse files Browse the repository at this point in the history
  • Loading branch information
Ahdhn committed Dec 13, 2024
1 parent f6b78d1 commit 5ae223f
Show file tree
Hide file tree
Showing 25 changed files with 212 additions and 193 deletions.
4 changes: 3 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,9 @@ set(cuda_flags
-Xcompiler=$<$<CXX_COMPILER_ID:MSVC>:${MSVC_XCOMPILER_FLAGS}>
#Disables warning
#177-D "function XXX was declared but never referenced"
-Xcudafe "--display_error_number --diag_suppress=177"
#174-D "expression has no effect"
#20054-D "dynamic initialization is not supported for a function-scope static __shared__ variable within a __device__/__global__ function"
-Xcudafe "--display_error_number --diag_suppress=177 --diag_suppress=174 --diag_suppress=20054"
-rdc=true
-lineinfo
--expt-extended-lambda
Expand Down
4 changes: 2 additions & 2 deletions include/rxmesh/attribute.h
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ class Attribute : public AttributeBase
/**
* @brief return the amount of allocated memory in megabytes
*/
const double get_memory_mg() const
double get_memory_mg() const
{
return m_memory_mega_bytes;
}
Expand Down Expand Up @@ -412,7 +412,7 @@ class Attribute : public AttributeBase
#pragma omp parallel for
for (int p = 0; p < static_cast<int>(m_rxmesh->get_num_patches());
++p) {
for (int e = 0; e < capacity(p); ++e) {
for (uint32_t e = 0; e < capacity(p); ++e) {
m_h_attr[p][e] = value;
}
}
Expand Down
16 changes: 9 additions & 7 deletions include/rxmesh/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,21 @@ class Context
m_num_faces(nullptr),
m_num_vertices(nullptr),
m_num_patches(nullptr),
m_max_num_vertices(nullptr),
m_max_num_edges(nullptr),
m_max_num_faces(nullptr),
m_d_vertex_prefix(nullptr),
m_d_edge_prefix(nullptr),
m_d_face_prefix(nullptr),
m_h_vertex_prefix(nullptr),
m_h_edge_prefix(nullptr),
m_h_face_prefix(nullptr),
m_capacity_factor(0.0f),
m_patches_info(nullptr),
m_h_face_prefix(nullptr),
m_max_lp_capacity_v(0),
m_max_lp_capacity_e(0),
m_max_lp_capacity_f(0)
m_max_lp_capacity_f(0),
m_patches_info(nullptr),
m_capacity_factor(0.0f),
m_max_num_patches(0)
{
}

Expand Down Expand Up @@ -185,9 +189,7 @@ class Context
*/
template <typename HandleT>
__device__ __host__ __inline__ uint32_t linear_id(HandleT input) const
{
using LocalT = typename HandleT::LocalT;

{
assert(input.is_valid());

assert(input.patch_id() < m_num_patches[0]);
Expand Down
29 changes: 14 additions & 15 deletions include/rxmesh/hash_functions.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,11 @@ struct universal_hash
}

universal_hash(const universal_hash&) = default;
__host__ __device__ universal_hash() : m_hash_x(0u), m_hash_y(0u){};
universal_hash(universal_hash&&) = default;
__host__ __device__ universal_hash() : m_hash_x(0u), m_hash_y(0u) {};
universal_hash(universal_hash&&) = default;
universal_hash& operator=(universal_hash const&) = default;
universal_hash& operator=(universal_hash&&) = default;
~universal_hash() = default;
universal_hash& operator=(universal_hash&&) = default;
~universal_hash() = default;

static constexpr uint32_t prime_divisor = 4294967291u;

Expand Down Expand Up @@ -105,14 +105,13 @@ struct MurmurHash3_32
{
}

MurmurHash3_32(const MurmurHash3_32&) = default;
MurmurHash3_32(MurmurHash3_32&&) = default;
MurmurHash3_32(const MurmurHash3_32&) = default;
MurmurHash3_32(MurmurHash3_32&&) = default;
MurmurHash3_32& operator=(MurmurHash3_32 const&) = default;
MurmurHash3_32& operator=(MurmurHash3_32&&) = default;
~MurmurHash3_32() = default;
MurmurHash3_32& operator=(MurmurHash3_32&&) = default;
~MurmurHash3_32() = default;

constexpr uint32_t __host__ __device__
operator()(Key const& key) const noexcept
uint32_t __host__ __device__ operator()(Key const& key) const noexcept
{
constexpr int len = sizeof(Key);
const uint8_t* const data = (const uint8_t*)&key;
Expand Down Expand Up @@ -179,12 +178,12 @@ struct MurmurHash3_32
// Taken from https://github.com/skeeto/hash-prospector
struct hash16_xm2
{
hash16_xm2() = default;
hash16_xm2(const hash16_xm2&) = default;
hash16_xm2(hash16_xm2&&) = default;
hash16_xm2() = default;
hash16_xm2(const hash16_xm2&) = default;
hash16_xm2(hash16_xm2&&) = default;
hash16_xm2& operator=(hash16_xm2 const&) = default;
hash16_xm2& operator=(hash16_xm2&&) = default;
~hash16_xm2() = default;
hash16_xm2& operator=(hash16_xm2&&) = default;
~hash16_xm2() = default;

constexpr uint16_t __host__ __device__
operator()(uint16_t key) const noexcept
Expand Down
2 changes: 1 addition & 1 deletion include/rxmesh/lp_hashtable.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ struct LPHashTable
if (m_is_on_device) {
CUDA_ERROR(cudaMemset(m_table, INVALID8, num_bytes()));
} else {
std::memset(m_table, INVALID8, num_bytes());
std::fill_n(m_table, m_capacity, LPPair());
}
}

Expand Down
22 changes: 11 additions & 11 deletions include/rxmesh/matrix/dense_matrix.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -213,7 +213,7 @@ struct DenseMatrix
* @brief accessing a specific value in the matrix using the row and col
* index. Can be used on both host and device
*/
__host__ __device__ T& operator()(const uint32_t row, const uint32_t col)
__host__ __device__ T& operator()(const IndexT row, const IndexT col)
{
assert(row < m_num_rows);
assert(col < m_num_cols);
Expand All @@ -229,8 +229,8 @@ struct DenseMatrix
* @brief accessing a specific value in the matrix using the row and col
* index. Can be used on both host and device
*/
__host__ __device__ const T& operator()(const uint32_t row,
const uint32_t col) const
__host__ __device__ const T& operator()(const IndexT row,
const IndexT col) const
{
assert(row < m_num_rows);
assert(col < m_num_cols);
Expand All @@ -247,7 +247,7 @@ struct DenseMatrix
* @brief access the matrix using vertex/edge/face handle as a row index.
*/
template <typename HandleT>
__host__ __device__ T& operator()(const HandleT handle, const uint32_t col)
__host__ __device__ T& operator()(const HandleT handle, const IndexT col)
{
return this->operator()(get_row_id(handle), col);
}
Expand All @@ -257,7 +257,7 @@ struct DenseMatrix
*/
template <typename HandleT>
__host__ __device__ const T& operator()(const HandleT handle,
const uint32_t col) const
const IndexT col) const
{
return this->operator()(get_row_id(handle), col);
}
Expand Down Expand Up @@ -620,11 +620,11 @@ struct DenseMatrix
* handle
*/
template <typename HandleT>
__host__ __device__ const uint32_t get_row_id(const HandleT handle) const
__host__ __device__ IndexT get_row_id(const HandleT handle) const
{
auto id = handle.unpack();

uint32_t row;
IndexT row;

if constexpr (std::is_same_v<HandleT, VertexHandle>) {
row = m_context.vertex_prefix()[id.first] + id.second;
Expand Down Expand Up @@ -662,7 +662,7 @@ struct DenseMatrix
/**
* @brief return the raw pointer pf a column.
*/
__host__ const T* col_data(const uint32_t ld_idx,
__host__ const T* col_data(const IndexT ld_idx,
locationT location = DEVICE) const
{
if ((location & HOST) == HOST) {
Expand All @@ -685,7 +685,7 @@ struct DenseMatrix
/**
* @brief return the raw pointer pf a column.
*/
__host__ T* col_data(const uint32_t ld_idx, locationT location = DEVICE)
__host__ T* col_data(const IndexT ld_idx, locationT location = DEVICE)
{
if ((location & HOST) == HOST) {
return m_h_val + ld_idx * m_num_rows;
Expand Down Expand Up @@ -893,8 +893,8 @@ struct DenseMatrix
/**
* @brief return the 1d indext given the row and column id
*/
__host__ __device__ __inline__ int get_index(uint32_t row,
uint32_t col) const
__host__ __device__ __inline__ int get_index(IndexT row,
IndexT col) const
{
if constexpr (Order == Eigen::ColMajor) {
return col * m_num_rows + row;
Expand Down
27 changes: 13 additions & 14 deletions include/rxmesh/matrix/kmeans_patch.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -13,20 +13,19 @@ namespace rxmesh {
namespace detail {

template <uint32_t blockThreads>
__inline__ __device__ void bi_assignment_ggp(
cooperative_groups::thread_block& block,
const uint16_t num_vertices,
const Bitmask& s_owned_v,
const bool ignore_owned_v,
const Bitmask& s_active_v,
const uint16_t* m_s_vv_offset,
const uint16_t* m_s_vv,
Bitmask& s_assigned_v,
Bitmask& s_current_frontier_v,
Bitmask& s_next_frontier_v,
Bitmask& s_partition_a_v,
Bitmask& s_partition_b_v,
int num_iter);
__device__ void bi_assignment_ggp(cooperative_groups::thread_block& block,
const uint16_t num_vertices,
const Bitmask& s_owned_v,
const bool ignore_owned_v,
const Bitmask& s_active_v,
const uint16_t* m_s_vv_offset,
const uint16_t* m_s_vv,
Bitmask& s_assigned_v,
Bitmask& s_current_frontier_v,
Bitmask& s_next_frontier_v,
Bitmask& s_partition_a_v,
Bitmask& s_partition_b_v,
int num_iter);
}


Expand Down
24 changes: 12 additions & 12 deletions include/rxmesh/matrix/nd_permute.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -217,10 +217,10 @@ void heavy_max_matching(const RXMeshStatic& rx,
}
if (v_weight_sum != rx.get_num_vertices()) {
RXMESH_ERROR(
"Unexpected behavior in heavy_max_matching as the sum of the
" " patch graph's vertex weight ({}) does not match the number of
" " vertices in the mesh({})
.", v_weight_sum,
"Unexpected behavior in heavy_max_matching as the sum of the patch "
"graph's vertex weight ({}) does not match the number of vertices "
"in the mesh({}).",
v_weight_sum,
rx.get_num_vertices());
}

Expand Down Expand Up @@ -338,7 +338,7 @@ void heavy_max_matching(const RXMeshStatic& rx,
#endif


for (int i = 0; i < l.nodes.size(); ++i) {
for (size_t i = 0; i < l.nodes.size(); ++i) {
const auto& node = l.nodes[i];
// the neighbors to this node is the union of neighbors of node.lch and
// node.rch. We don't store node.lcu/node.rch, but instead we store
Expand Down Expand Up @@ -500,13 +500,13 @@ __global__ static void extract_separators(const Context context,
const int* d_patch_proj_l,
const int* d_patch_proj_l1,
VertexAttribute<int> v_index,
//VertexAttribute<int> v_render,
int* d_permute,
int current_level,
int depth,
const int* d_dfs_index,
int* d_count,
int* d_cut_size)
// VertexAttribute<int> v_render,
int* d_permute,
int current_level,
int depth,
const int* d_dfs_index,
int* d_count,
int* d_cut_size)
{
// d_patch_proj_l is the patch projection on this level
// d_patch_proj_l1 is the patch projection on the next level (i.e.,
Expand Down
4 changes: 2 additions & 2 deletions include/rxmesh/matrix/permute_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,10 +33,10 @@ bool is_unique_permutation(uint32_t size, T* h_permute)
template <typename T>
void inverse_permutation(uint32_t size, T* perm, T* helper)
{
for (int i = 0; i < size; ++i) {
for (uint32_t i = 0; i < size; ++i) {
helper[perm[i]] = i;
}
for (int i = 0; i < size; ++i) {
for (uint32_t i = 0; i < size; ++i) {
perm[i] = helper[i];
}
}
Expand Down
9 changes: 5 additions & 4 deletions include/rxmesh/matrix/sparse_matrix.cuh
Original file line number Diff line number Diff line change
Expand Up @@ -468,6 +468,7 @@ struct SparseMatrix
}
}
assert(1 != 1);
return get_val_at(0);
}

/**
Expand All @@ -485,6 +486,7 @@ struct SparseMatrix
}
}
assert(1 != 1);
return T(0);
}

/**
Expand Down Expand Up @@ -529,8 +531,7 @@ struct SparseMatrix
/**
* @brief return the row index corresponding to specific vertex handle
*/
__device__ __host__ const uint32_t
get_row_id(const VertexHandle& handle) const
__device__ __host__ uint32_t get_row_id(const VertexHandle& handle) const
{
auto id = handle.unpack();
return m_context.vertex_prefix()[id.first] + id.second;
Expand Down Expand Up @@ -1789,9 +1790,9 @@ struct SparseMatrix
if constexpr (std::is_same_v<T, double>) {
CUSOLVER_ERROR(cusolverSpDcsrlsvqr(handle,
rows(),
non_zeros(),
non_zeros(),
m_descr,
m_d_val,
m_d_val,
m_d_row_ptr,
m_d_col_idx,
d_b,
Expand Down
Loading

0 comments on commit 5ae223f

Please sign in to comment.