Skip to content

Commit

Permalink
CUDA_LOGE
Browse files Browse the repository at this point in the history
  • Loading branch information
Fedr committed Jan 2, 2025
1 parent 27b5238 commit 306592b
Show file tree
Hide file tree
Showing 3 changed files with 44 additions and 36 deletions.
34 changes: 19 additions & 15 deletions source/MRCuda/MRCudaBasic.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,21 @@ namespace Cuda
/// spdlog::error the information about some CUDA error including optional filename and line number
MRCUDA_API cudaError_t logError( cudaError_t code, const char * file = nullptr, int line = 0 );

/// executes given CUDA function and logs the error if any
#define CUDA_EXEC( func ) MR::Cuda::logError( func, __FILE__ , __LINE__ )
/// evaluates given expression and logs the error if any
#define CUDA_LOGE( expr ) MR::Cuda::logError( expr, __FILE__ , __LINE__ )
// deprecated
#define CUDA_EXEC( expr ) CUDA_LOGE( expr )

/// executes given CUDA function, logs if it fails and returns error code
#define CUDA_EXEC_RETURN( func ) if ( auto code = CUDA_EXEC( func ); code != cudaError::cudaSuccess ) return code
/// evaluates given expression, logs if it fails and returns error code
#define CUDA_LOGE_RETURN( expr ) if ( auto code = CUDA_LOGE( expr ); code != cudaError::cudaSuccess ) return code
// deprecated
#define CUDA_EXEC_RETURN( expr ) CUDA_LOGE_RETURN( expr )

/// if func evaluates not to cudaError::cudaSuccess, then returns MR::unexpected with the error string without logging
#define CUDA_RETURN_UNEXPECTED( func ) if ( auto code = ( func ); code != cudaError::cudaSuccess ) return MR::unexpected( MR::Cuda::getError( code ) )
/// if given expression evaluates to not cudaError::cudaSuccess, then returns MR::unexpected with the error string without logging
#define CUDA_RETURN_UNEXPECTED( expr ) if ( auto code = ( expr ); code != cudaError::cudaSuccess ) return MR::unexpected( MR::Cuda::getError( code ) )

/// executes given CUDA function, logs if it fails and returns MR::unexpected with the error string
#define CUDA_EXEC_RETURN_UNEXPECTED( func ) if ( auto code = CUDA_EXEC( func ); code != cudaError::cudaSuccess ) return MR::unexpected( MR::Cuda::getError( code ) )
/// evaluates given expression, logs if it fails and returns MR::unexpected with the error string
#define CUDA_LOGE_RETURN_UNEXPECTED( expr ) if ( auto code = CUDA_LOGE( expr ); code != cudaError::cudaSuccess ) return MR::unexpected( MR::Cuda::getError( code ) )

template<typename T>
DynamicArray<T>::DynamicArray( size_t size )
Expand Down Expand Up @@ -55,7 +59,7 @@ inline cudaError_t DynamicArray<T>::fromVector( const std::vector<U>& vec )
static_assert ( sizeof( T ) == sizeof( U ) );
if ( auto code = resize( vec.size() ) )
return code;
return CUDA_EXEC( cudaMemcpy( data_, vec.data(), size_ * sizeof( T ), cudaMemcpyHostToDevice ) );
return CUDA_LOGE( cudaMemcpy( data_, vec.data(), size_ * sizeof( T ), cudaMemcpyHostToDevice ) );
}


Expand All @@ -64,13 +68,13 @@ inline cudaError_t DynamicArray<T>::fromBytes( const uint8_t* data, size_t numBy
{
assert( numBytes % sizeof( T ) == 0 );
resize( numBytes / sizeof( T ) );
return CUDA_EXEC( cudaMemcpy( data_, data, numBytes, cudaMemcpyHostToDevice ) );
return CUDA_LOGE( cudaMemcpy( data_, data, numBytes, cudaMemcpyHostToDevice ) );
}

template <typename T>
inline cudaError_t DynamicArray<T>::toBytes( uint8_t* data )
{
return CUDA_EXEC( cudaMemcpy( data, data_, size_ * sizeof( T ), cudaMemcpyDeviceToHost ) );
return CUDA_LOGE( cudaMemcpy( data, data_, size_ * sizeof( T ), cudaMemcpyDeviceToHost ) );
}

template<typename T>
Expand All @@ -80,14 +84,14 @@ cudaError_t DynamicArray<T>::resize( size_t size )
return cudaSuccess;
if ( size_ != 0 )
{
if ( auto code = CUDA_EXEC( cudaFree( data_ ) ) )
if ( auto code = CUDA_LOGE( cudaFree( data_ ) ) )
return code;
}

size_ = size;
if ( size_ != 0 )
{
if ( auto code = CUDA_EXEC( cudaMalloc( ( void** )&data_, size_ * sizeof( T ) ) ) )
if ( auto code = CUDA_LOGE( cudaMalloc( ( void** )&data_, size_ * sizeof( T ) ) ) )
return code;
}
return cudaSuccess;
Expand All @@ -99,14 +103,14 @@ cudaError_t DynamicArray<T>::toVector( std::vector<U>& vec ) const
{
static_assert ( sizeof( T ) == sizeof( U ) );
vec.resize( size_ );
return CUDA_EXEC( cudaMemcpy( vec.data(), data_, size_ * sizeof( T ), cudaMemcpyDeviceToHost ) );
return CUDA_LOGE( cudaMemcpy( vec.data(), data_, size_ * sizeof( T ), cudaMemcpyDeviceToHost ) );
}

inline cudaError_t setToZero( DynamicArrayF& devArray )
{
if ( devArray.size() == 0 )
return cudaSuccess;
return CUDA_EXEC( cudaMemset( devArray.data(), 0, devArray.size() * sizeof( float ) ) );
return CUDA_LOGE( cudaMemset( devArray.data(), 0, devArray.size() * sizeof( float ) ) );
}

} // namespace Cuda
Expand Down
38 changes: 21 additions & 17 deletions source/MRCuda/MRCudaFastWindingNumber.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ FastWindingNumber::FastWindingNumber( const Mesh& mesh ) : mesh_( mesh )

Expected<void> FastWindingNumber::prepareData_( ProgressCallback cb )
{
CUDA_EXEC_RETURN_UNEXPECTED( cudaSetDevice( 0 ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaSetDevice( 0 ) );
if ( data_ )
{
if ( !reportProgress( cb, 1.0f ) )
Expand All @@ -42,11 +42,11 @@ Expected<void> FastWindingNumber::prepareData_( ProgressCallback cb )
if ( !reportProgress( cb, 0.0f ) )
return unexpectedOperationCanceled();

CUDA_RETURN_UNEXPECTED( data->cudaMeshPoints.fromVector( mesh_.points.vec_ ) );
CUDA_LOGE_RETURN_UNEXPECTED( data->cudaMeshPoints.fromVector( mesh_.points.vec_ ) );
if ( !reportProgress( cb, 0.1f ) )
return unexpectedOperationCanceled();

CUDA_RETURN_UNEXPECTED( data->cudaFaces.fromVector( mesh_.topology.getTriangulation().vec_ ) );
CUDA_LOGE_RETURN_UNEXPECTED( data->cudaFaces.fromVector( mesh_.topology.getTriangulation().vec_ ) );
if ( !reportProgress( cb, 0.3f ) )
return unexpectedOperationCanceled();

Expand All @@ -55,11 +55,11 @@ Expected<void> FastWindingNumber::prepareData_( ProgressCallback cb )
return unexpectedOperationCanceled();

const auto& nodes = tree.nodes();
CUDA_RETURN_UNEXPECTED( data->cudaNodes.fromVector( nodes.vec_ ) );
CUDA_LOGE_RETURN_UNEXPECTED( data->cudaNodes.fromVector( nodes.vec_ ) );
if ( !reportProgress( cb, 0.6f ) )
return unexpectedOperationCanceled();

CUDA_RETURN_UNEXPECTED( data->dipoles.fromVector( mesh_.getDipoles().vec_ ) );
CUDA_LOGE_RETURN_UNEXPECTED( data->dipoles.fromVector( mesh_.getDipoles().vec_ ) );
if ( !reportProgress( cb, 1.0f ) )
return unexpectedOperationCanceled();

Expand All @@ -74,17 +74,18 @@ Expected<void> FastWindingNumber::calcFromVector( std::vector<float>& res, const
{
const size_t size = points.size();
res.resize( size );
CUDA_RETURN_UNEXPECTED( data_->cudaPoints.fromVector( points ) );
CUDA_LOGE_RETURN_UNEXPECTED( data_->cudaPoints.fromVector( points ) );
if ( !reportProgress( cb, 0.6f ) )
return unexpectedOperationCanceled();

DynamicArrayF cudaResult( size );
DynamicArrayF cudaResult;
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.resize( size ) );
fastWindingNumberFromVector( data_->cudaPoints.data(), data_->dipoles.data(), data_->cudaNodes.data(), data_->cudaMeshPoints.data(), data_->cudaFaces.data(), cudaResult.data(), beta, int( skipFace ), size );
CUDA_EXEC_RETURN_UNEXPECTED( cudaGetLastError() );
CUDA_LOGE_RETURN_UNEXPECTED( cudaGetLastError() );
if ( !reportProgress( cb, 0.7f ) )
return unexpectedOperationCanceled();

CUDA_RETURN_UNEXPECTED( cudaResult.toVector( res ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.toVector( res ) );
if ( !reportProgress( cb, 1.0f ) )
return unexpectedOperationCanceled();
return {};
Expand All @@ -99,13 +100,14 @@ bool FastWindingNumber::calcSelfIntersections( FaceBitSet& res, float beta, Prog

const size_t size = mesh_.topology.faceSize();
DynamicArrayF cudaResult( size );
//CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.resize( size ) );

fastWindingNumberFromMesh(data_->dipoles.data(), data_->cudaNodes.data(), data_->cudaMeshPoints.data(), data_->cudaFaces.data(), cudaResult.data(), beta, size);
if ( CUDA_EXEC( cudaGetLastError() ) )
if ( CUDA_LOGE( cudaGetLastError() ) )
return false;

std::vector<float> wns;
if ( CUDA_EXEC( cudaResult.toVector( wns ) ) )
if ( CUDA_LOGE( cudaResult.toVector( wns ) ) )
return false;
if ( !reportProgress( cb, 0.9f ) )
return false;
Expand Down Expand Up @@ -137,7 +139,8 @@ Expected<void> FastWindingNumber::calcFromGrid( std::vector<float>& res, const V

const Matrix4 cudaGridToMeshXf = ( gridToMeshXf == AffineXf3f{} ) ? Matrix4{} : getCudaMatrix( gridToMeshXf );
const size_t size = size_t( dims.x ) * dims.y * dims.z;
DynamicArrayF cudaResult( size );
DynamicArrayF cudaResult;
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.resize( size ) );
if ( !reportProgress( cb, 0.0f ) )
return unexpectedOperationCanceled();

Expand All @@ -147,9 +150,9 @@ Expected<void> FastWindingNumber::calcFromGrid( std::vector<float>& res, const V
data_->dipoles.data(), data_->cudaNodes.data(), data_->cudaMeshPoints.data(), data_->cudaFaces.data(),
cudaResult.data(), beta );

CUDA_EXEC_RETURN_UNEXPECTED( cudaGetLastError() );
CUDA_LOGE_RETURN_UNEXPECTED( cudaGetLastError() );

CUDA_RETURN_UNEXPECTED( cudaResult.toVector( res ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.toVector( res ) );

if ( !reportProgress( cb, 1.0f ) )
return unexpectedOperationCanceled();
Expand All @@ -175,7 +178,8 @@ Expected<void> FastWindingNumber::calcFromGridWithDistances( std::vector<float>&

const Matrix4 cudaGridToMeshXf = ( gridToMeshXf == AffineXf3f{} ) ? Matrix4{} : getCudaMatrix( gridToMeshXf );
const size_t size = size_t( dims.x ) * dims.y * dims.z;
DynamicArrayF cudaResult( size );
DynamicArrayF cudaResult;
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.resize( size ) );
if ( !reportProgress( cb, 0.0f ) )
return unexpectedOperationCanceled();

Expand All @@ -185,9 +189,9 @@ Expected<void> FastWindingNumber::calcFromGridWithDistances( std::vector<float>&
data_->dipoles.data(), data_->cudaNodes.data(), data_->cudaMeshPoints.data(), data_->cudaFaces.data(),
cudaResult.data(), options );

CUDA_EXEC_RETURN_UNEXPECTED( cudaGetLastError() );
CUDA_LOGE_RETURN_UNEXPECTED( cudaGetLastError() );

CUDA_RETURN_UNEXPECTED( cudaResult.toVector( res ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaResult.toVector( res ) );

if ( !reportProgress( cb, 1.0f ) )
return unexpectedOperationCanceled();
Expand Down
8 changes: 4 additions & 4 deletions source/MRCuda/MRCudaTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ namespace Cuda
{
Expected<void> negatePicture( Image& image )
{
CUDA_EXEC_RETURN_UNEXPECTED( cudaSetDevice( 0 ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaSetDevice( 0 ) );

DynamicArray<Cuda::Color> cudaArray;
CUDA_RETURN_UNEXPECTED( cudaArray.fromVector( image.pixels ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaArray.fromVector( image.pixels ) );

negatePictureKernel( cudaArray );
CUDA_EXEC_RETURN_UNEXPECTED( cudaGetLastError() );
CUDA_LOGE_RETURN_UNEXPECTED( cudaGetLastError() );

CUDA_RETURN_UNEXPECTED( cudaArray.toVector( image.pixels ) );
CUDA_LOGE_RETURN_UNEXPECTED( cudaArray.toVector( image.pixels ) );

return {};
}
Expand Down

0 comments on commit 306592b

Please sign in to comment.