From e278022dea09c6a844e4837c841cbadb51d2851b Mon Sep 17 00:00:00 2001 From: Harrison <53527582+HTV04@users.noreply.github.com> Date: Mon, 30 May 2022 01:50:14 -0400 Subject: [PATCH] Remove deprecated use of register keyword Based on https://github.com/devkitPro/libogc/commit/ed2f0bb0b488e1a9c7afeee2b822e44aeed3ad0a Co-Authored-By: Dave Murphy --- gc/mad.h | 6 ++-- gc/ogc/cast.h | 68 +++++++++++++++++++------------------- gc/ogc/gu.h | 60 ++++++++++++++++----------------- gc/ogc/machine/processor.h | 28 ++++++++-------- gc/ogc/machine/spinlock.h | 26 +++++++-------- libmad/fixed.h | 6 ++-- 6 files changed, 97 insertions(+), 97 deletions(-) diff --git a/gc/mad.h b/gc/mad.h index 34d966c5..78e5c8ea 100644 --- a/gc/mad.h +++ b/gc/mad.h @@ -267,7 +267,7 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y) # elif defined(FPM_ARM) -/* +/* * This ARM V4 version is as accurate as FPM_64BIT but much faster. The * least significant bit is properly rounded at no CPU cycle cost! */ @@ -476,8 +476,8 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y) # if !defined(mad_f_mul) # define mad_f_mul(x, y) \ - ({ register mad_fixed64hi_t __hi; \ - register mad_fixed64lo_t __lo; \ + ({ mad_fixed64hi_t __hi; \ + mad_fixed64lo_t __lo; \ MAD_F_MLX(__hi, __lo, (x), (y)); \ mad_f_scale64(__hi, __lo); \ }) diff --git a/gc/ogc/cast.h b/gc/ogc/cast.h index 1f1d76bb..a422ec60 100644 --- a/gc/ogc/cast.h +++ b/gc/ogc/cast.h @@ -53,37 +53,37 @@ static inline void CAST_Init(void) static inline void CAST_SetGQR2(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR2,val); } static inline void CAST_SetGQR3(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR3,val); } static inline void CAST_SetGQR4(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR4,val); } static inline void CAST_SetGQR5(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR5,val); } static inline void CAST_SetGQR6(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR6,val); } static inline void CAST_SetGQR7(u32 type,s32 scale) { - register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); + u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type))); __set_gqr(GQR7,val); } @@ -94,58 +94,58 @@ static inline void CAST_SetGQR7(u32 type,s32 scale) /* */ /******************************************************************/ -static inline f32 __castu8f32(register u8 *in) +static inline f32 __castu8f32( u8 *in) { - register f32 rval; + f32 rval; __asm__ __volatile__ ( "psq_l %[rval],0(%[in]),1,2" : [rval]"=f"(rval) : [in]"r"(in) ); return rval; } -static inline f32 __castu16f32(register u16 *in) +static inline f32 __castu16f32( u16 *in) { - register f32 rval; + f32 rval; __asm__ __volatile__ ( "psq_l %[rval],0(%[in]),1,3" : [rval]"=f"(rval) : [in]"r"(in) ); return rval; } -static inline f32 __casts8f32(register s8 *in) +static inline f32 __casts8f32( s8 *in) { - register f32 rval; + f32 rval; __asm__ __volatile__ ( "psq_l %[rval],0(%[in]),1,4" : [rval]"=f"(rval) : [in]"r"(in) ); return rval; } -static inline f32 __casts16f32(register s16 *in) +static inline f32 __casts16f32( s16 *in) { - register f32 rval; + f32 rval; __asm__ __volatile__ ( "psq_l %[rval],0(%[in]),1,5" : [rval]"=f"(rval) : [in]"r"(in) ); return rval; } -static inline void castu8f32(register u8 *in,register volatile f32 *out) +static inline void castu8f32( u8 *in, volatile f32 *out) { *out = __castu8f32(in); } -static inline void castu16f32(register u16 *in,register volatile f32 *out) +static inline void castu16f32( u16 *in, volatile f32 *out) { *out = __castu16f32(in); } -static inline void casts8f32(register s8 *in,register volatile f32 *out) +static inline void casts8f32( s8 *in, volatile f32 *out) { *out = __casts8f32(in); } -static inline void casts16f32(register s16 *in,register volatile f32 *out) +static inline void casts16f32( s16 *in, volatile f32 *out) { *out = __casts16f32(in); } @@ -156,11 +156,11 @@ static inline void casts16f32(register s16 *in,register volatile f32 *out) /* */ /******************************************************************/ -static inline u8 __castf32u8(register f32 in) +static inline u8 __castf32u8( f32 in) { f32 a; - register u8 rval; - register f32 *ptr = &a; + u8 rval; + f32 *ptr = &a; __asm__ __volatile__ ( "psq_st %[in],0(%[ptr]),1,2\n" @@ -170,11 +170,11 @@ static inline u8 __castf32u8(register f32 in) return rval; } -static inline u16 __castf32u16(register f32 in) +static inline u16 __castf32u16( f32 in) { f32 a; - register u16 rval; - register f32 *ptr = &a; + u16 rval; + f32 *ptr = &a; __asm__ __volatile__ ( "psq_st %[in],0(%[ptr]),1,3\n" @@ -184,11 +184,11 @@ static inline u16 __castf32u16(register f32 in) return rval; } -static inline s8 __castf32s8(register f32 in) +static inline s8 __castf32s8( f32 in) { f32 a; - register s8 rval; - register f32 *ptr = &a; + s8 rval; + f32 *ptr = &a; __asm__ __volatile__ ( "psq_st %[in],0(%[ptr]),1,4\n" @@ -198,11 +198,11 @@ static inline s8 __castf32s8(register f32 in) return rval; } -static inline s16 __castf32s16(register f32 in) +static inline s16 __castf32s16( f32 in) { f32 a; - register s16 rval; - register f32 *ptr = &a; + s16 rval; + f32 *ptr = &a; __asm__ __volatile__ ( "psq_st %[in],0(%[ptr]),1,5\n" @@ -212,22 +212,22 @@ static inline s16 __castf32s16(register f32 in) return rval; } -static inline void castf32u8(register f32 *in,register vu8 *out) +static inline void castf32u8( f32 *in, vu8 *out) { *out = __castf32u8(*in); } -static inline void castf32u16(register f32 *in,register vu16 *out) +static inline void castf32u16( f32 *in, vu16 *out) { *out = __castf32u16(*in); } -static inline void castf32s8(register f32 *in,register vs8 *out) +static inline void castf32s8( f32 *in, vs8 *out) { *out = __castf32s8(*in); } -static inline void castf32s16(register f32 *in,register vs16 *out) +static inline void castf32s16( f32 *in, vs16 *out) { *out = __castf32s16(*in); } diff --git a/gc/ogc/gu.h b/gc/ogc/gu.h index 745509d9..f2cd572e 100644 --- a/gc/ogc/gu.h +++ b/gc/ogc/gu.h @@ -333,7 +333,7 @@ void guLookAt(Mtx mt,guVector *camPos,guVector *camUp,guVector *target); * surface to the general viewing direction. * * \a a and \a b do not have to be unit vectors. Both of these vectors are assumed to be pointing towards the surface from the - * light or viewer, respectively. Local copies of these vectors are negated, normalized and added head to tail. + * light or viewer, respectively. Local copies of these vectors are negated, normalized and added head to tail. * * \a half is computed as a unit vector that points from the surface to halfway between the light and the viewing direction. * @@ -355,14 +355,14 @@ void c_guVecMultiplySR(Mtx mt,guVector *src,guVector *dst); f32 c_guVecDotProduct(guVector *a,guVector *b); #ifdef GEKKO -void ps_guVecAdd(register guVector *a,register guVector *b,register guVector *ab); -void ps_guVecSub(register guVector *a,register guVector *b,register guVector *ab); -void ps_guVecScale(register guVector *src,register guVector *dst,f32 scale); -void ps_guVecNormalize(register guVector *v); -void ps_guVecCross(register guVector *a,register guVector *b,register guVector *axb); -void ps_guVecMultiply(register Mtx mt,register guVector *src,register guVector *dst); -void ps_guVecMultiplySR(register Mtx mt,register guVector *src,register guVector *dst); -f32 ps_guVecDotProduct(register guVector *a,register guVector *b); +void ps_guVecAdd( guVector *a, guVector *b, guVector *ab); +void ps_guVecSub( guVector *a, guVector *b, guVector *ab); +void ps_guVecScale( guVector *src, guVector *dst,f32 scale); +void ps_guVecNormalize( guVector *v); +void ps_guVecCross( guVector *a, guVector *b, guVector *axb); +void ps_guVecMultiply( Mtx mt, guVector *src, guVector *dst); +void ps_guVecMultiplySR( Mtx mt, guVector *src, guVector *dst); +f32 ps_guVecDotProduct( guVector *a, guVector *b); #endif //GEKKO void c_guQuatAdd(guQuaternion *a,guQuaternion *b,guQuaternion *ab); @@ -373,11 +373,11 @@ void c_guQuatInverse(guQuaternion *a,guQuaternion *d); void c_guQuatMtx(guQuaternion *a,Mtx m); #ifdef GEKKO -void ps_guQuatAdd(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab); -void ps_guQuatSub(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab); -void ps_guQuatMultiply(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab); -void ps_guQuatNormalize(register guQuaternion *a,register guQuaternion *d); -void ps_guQuatInverse(register guQuaternion *a,register guQuaternion *d); +void ps_guQuatAdd( guQuaternion *a, guQuaternion *b, guQuaternion *ab); +void ps_guQuatSub( guQuaternion *a, guQuaternion *b, guQuaternion *ab); +void ps_guQuatMultiply( guQuaternion *a, guQuaternion *b, guQuaternion *ab); +void ps_guQuatNormalize( guQuaternion *a, guQuaternion *d); +void ps_guQuatInverse( guQuaternion *a, guQuaternion *d); #endif void c_guMtxIdentity(Mtx mt); @@ -399,22 +399,22 @@ void c_guMtxReflect(Mtx m,guVector *p,guVector *n); void c_guMtxQuat(Mtx m,guQuaternion *a); #ifdef GEKKO -void ps_guMtxIdentity(register Mtx mt); -void ps_guMtxCopy(register Mtx src,register Mtx dst); -void ps_guMtxConcat(register Mtx a,register Mtx b,register Mtx ab); -void ps_guMtxTranspose(register Mtx src,register Mtx xPose); -u32 ps_guMtxInverse(register Mtx src,register Mtx inv); -u32 ps_guMtxInvXpose(register Mtx src,register Mtx xPose); -void ps_guMtxScale(register Mtx mt,register f32 xS,register f32 yS,register f32 zS); -void ps_guMtxScaleApply(register Mtx src,register Mtx dst,register f32 xS,register f32 yS,register f32 zS); -void ps_guMtxApplyScale(register Mtx src,register Mtx dst,register f32 xS,register f32 yS,register f32 zS); -void ps_guMtxTrans(register Mtx mt,register f32 xT,register f32 yT,register f32 zT); -void ps_guMtxTransApply(register Mtx src,register Mtx dst,register f32 xT,register f32 yT,register f32 zT); -void ps_guMtxApplyTrans(register Mtx src,register Mtx dst,register f32 xT,register f32 yT,register f32 zT); -void ps_guMtxRotRad(register Mtx mt,register const char axis,register f32 rad); -void ps_guMtxRotTrig(register Mtx mt,register const char axis,register f32 sinA,register f32 cosA); -void ps_guMtxRotAxisRad(register Mtx mt,register guVector *axis,register f32 tmp0); -void ps_guMtxReflect(register Mtx m,register guVector *p,register guVector *n); +void ps_guMtxIdentity( Mtx mt); +void ps_guMtxCopy( Mtx src, Mtx dst); +void ps_guMtxConcat( Mtx a, Mtx b, Mtx ab); +void ps_guMtxTranspose( Mtx src, Mtx xPose); +u32 ps_guMtxInverse( Mtx src, Mtx inv); +u32 ps_guMtxInvXpose( Mtx src, Mtx xPose); +void ps_guMtxScale( Mtx mt, f32 xS, f32 yS, f32 zS); +void ps_guMtxScaleApply( Mtx src, Mtx dst, f32 xS, f32 yS, f32 zS); +void ps_guMtxApplyScale( Mtx src, Mtx dst, f32 xS, f32 yS, f32 zS); +void ps_guMtxTrans( Mtx mt, f32 xT, f32 yT, f32 zT); +void ps_guMtxTransApply( Mtx src, Mtx dst, f32 xT, f32 yT, f32 zT); +void ps_guMtxApplyTrans( Mtx src, Mtx dst, f32 xT, f32 yT, f32 zT); +void ps_guMtxRotRad( Mtx mt, const char axis, f32 rad); +void ps_guMtxRotTrig( Mtx mt, const char axis, f32 sinA, f32 cosA); +void ps_guMtxRotAxisRad( Mtx mt, guVector *axis, f32 tmp0); +void ps_guMtxReflect( Mtx m, guVector *p, guVector *n); #endif //GEKKO void guMtx44Identity(Mtx44 mt); diff --git a/gc/ogc/machine/processor.h b/gc/ogc/machine/processor.h index 61d85d5b..ecf96839 100644 --- a/gc/ogc/machine/processor.h +++ b/gc/ogc/machine/processor.h @@ -23,24 +23,24 @@ } \ }) -#define mfpvr() ({register u32 _rval; \ +#define mfpvr() ({ u32 _rval; \ asm volatile("mfpvr %0" : "=r"(_rval)); _rval;}) -#define mfdcr(_rn) ({register u32 _rval; \ +#define mfdcr(_rn) ({ u32 _rval; \ asm volatile("mfdcr %0," __stringify(_rn) \ : "=r" (_rval)); _rval;}) #define mtdcr(rn, val) asm volatile("mtdcr " __stringify(rn) ",%0" : : "r" (val)) -#define mfmsr() ({register u32 _rval; \ +#define mfmsr() ({ u32 _rval; \ asm volatile("mfmsr %0" : "=r" (_rval)); _rval;}) #define mtmsr(val) asm volatile("mtmsr %0" : : "r" (val)) -#define mfdec() ({register u32 _rval; \ +#define mfdec() ({ u32 _rval; \ asm volatile("mfdec %0" : "=r" (_rval)); _rval;}) #define mtdec(_val) asm volatile("mtdec %0" : : "r" (_val)) #define mfspr(_rn) \ -({ register u32 _rval = 0; \ +({ u32 _rval = 0; \ asm volatile("mfspr %0," __stringify(_rn) \ : "=r" (_rval));\ _rval; \ @@ -82,12 +82,12 @@ #define mtthrm3(_val) mtspr(THRM3,_val) #define __lhbrx(base,index) \ -({ register u16 res; \ +({ u16 res; \ __asm__ volatile ("lhbrx %0,%1,%2" : "=r"(res) : "b%"(index), "r"(base) : "memory"); \ res; }) #define __lwbrx(base,index) \ -({ register u32 res; \ +({ u32 res; \ __asm__ volatile ("lwbrx %0,%1,%2" : "=r"(res) : "b%"(index), "r"(base) : "memory"); \ res; }) @@ -97,7 +97,7 @@ #define __stwbrx(base,index,value) \ __asm__ volatile ("stwbrx %0,%1,%2" : : "r"(value), "b%"(index), "r"(base) : "memory") -#define cntlzw(_val) ({register u32 _rval; \ +#define cntlzw(_val) ({ u32 _rval; \ asm volatile("cntlzw %0, %1" : "=r"((_rval)) : "r"((_val))); _rval;}) #define _CPU_MSR_GET( _msr_value ) \ @@ -111,7 +111,7 @@ #define _CPU_ISR_Enable() \ do { \ - register u32 _val = 0; \ + u32 _val = 0; \ __asm__ __volatile__ ( \ "mfmsr %0\n" \ "ori %0,%0,0x8000\n" \ @@ -122,7 +122,7 @@ #define _CPU_ISR_Disable( _isr_cookie ) \ do { \ - register u32 _disable_mask = 0; \ + u32 _disable_mask = 0; \ __asm__ __volatile__ ( \ "mfmsr %1\n" \ "rlwinm %0,%1,0,17,15\n" \ @@ -134,7 +134,7 @@ #define _CPU_ISR_Restore( _isr_cookie ) \ do { \ - register u32 _enable_mask = 0; \ + u32 _enable_mask = 0; \ __asm__ __volatile__ ( \ "cmpwi %1,0\n" \ "beq 1f\n" \ @@ -148,7 +148,7 @@ #define _CPU_ISR_Flash( _isr_cookie ) \ do { \ - register u32 _flash_mask = 0; \ + u32 _flash_mask = 0; \ __asm__ __volatile__ ( \ "cmpwi %1,0\n" \ "beq 1f\n" \ @@ -164,13 +164,13 @@ } while (0) #define _CPU_FPR_Enable() \ -{ register u32 _val = 0; \ +{ u32 _val = 0; \ asm volatile ("mfmsr %0; ori %0,%0,0x2000; mtmsr %0" : \ "=&r" (_val) : "0" (_val));\ } #define _CPU_FPR_Disable() \ -{ register u32 _val = 0; \ +{ u32 _val = 0; \ asm volatile ("mfmsr %0; rlwinm %0,%0,0,19,17; mtmsr %0" : \ "=&r" (_val) : "0" (_val));\ } diff --git a/gc/ogc/machine/spinlock.h b/gc/ogc/machine/spinlock.h index 1d6022a2..765c85fa 100644 --- a/gc/ogc/machine/spinlock.h +++ b/gc/ogc/machine/spinlock.h @@ -14,7 +14,7 @@ typedef struct { static __inline__ u32 _test_and_set(u32 *atomic) { - register u32 ret; + u32 ret; __asm__ __volatile__ ("1: lwarx %0,0,%1\n" " cmpwi 0,%0,0\n" @@ -31,7 +31,7 @@ static __inline__ u32 _test_and_set(u32 *atomic) static __inline__ u32 atomic_inc(u32 *pint) { - register u32 ret; + u32 ret; __asm__ __volatile__( "1: lwarx %0,0,%1\n\ addi %0,%0,1\n\ @@ -45,7 +45,7 @@ static __inline__ u32 atomic_inc(u32 *pint) static __inline__ u32 atomic_dec(u32 *pint) { - register u32 ret; + u32 ret; __asm__ __volatile__( "1: lwarx %0,0,%1\n\ addi %0,%0,-1\n\ @@ -59,7 +59,7 @@ static __inline__ u32 atomic_dec(u32 *pint) static __inline__ void spin_lock(spinlock_t *lock) { - register u32 tmp; + u32 tmp; __asm__ __volatile__( "b 1f # spin_lock\n\ @@ -77,13 +77,13 @@ static __inline__ void spin_lock(spinlock_t *lock) : "cr0", "memory"); } -static __inline__ void spin_lock_irqsave(spinlock_t *lock,register u32 *p_isr_level) +static __inline__ void spin_lock_irqsave(spinlock_t *lock, u32 *p_isr_level) { - register u32 level; - register u32 tmp; + u32 level; + u32 tmp; _CPU_ISR_Disable(level); - + __asm__ __volatile__( " b 1f # spin_lock\n\ 2: lwzx %0,0,%1\n\ @@ -98,7 +98,7 @@ static __inline__ void spin_lock_irqsave(spinlock_t *lock,register u32 *p_isr_le : "=&r"(tmp) : "r"(lock), "r"(1) : "cr0", "memory"); - + *p_isr_level = level; } @@ -108,7 +108,7 @@ static __inline__ void spin_unlock(spinlock_t *lock) lock->lock = 0; } -static __inline__ void spin_unlock_irqrestore(spinlock_t *lock,register u32 isr_level) +static __inline__ void spin_unlock_irqrestore(spinlock_t *lock, u32 isr_level) { __asm__ __volatile__( "eieio # spin_unlock" @@ -128,7 +128,7 @@ typedef struct { static __inline__ void read_lock(rwlock_t *rw) { - register u32 tmp; + u32 tmp; __asm__ __volatile__( "b 2f # read_lock\n\ @@ -148,7 +148,7 @@ static __inline__ void read_lock(rwlock_t *rw) static __inline__ void read_unlock(rwlock_t *rw) { - register u32 tmp; + u32 tmp; __asm__ __volatile__( "eieio # read_unlock\n\ @@ -163,7 +163,7 @@ static __inline__ void read_unlock(rwlock_t *rw) static __inline__ void write_lock(rwlock_t *rw) { - register u32 tmp; + u32 tmp; __asm__ __volatile__( "b 2f # write_lock\n\ diff --git a/libmad/fixed.h b/libmad/fixed.h index cd8387d4..6ee51c04 100644 --- a/libmad/fixed.h +++ b/libmad/fixed.h @@ -227,7 +227,7 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y) # elif defined(FPM_ARM) -/* +/* * This ARM V4 version is as accurate as FPM_64BIT but much faster. The * least significant bit is properly rounded at no CPU cycle cost! */ @@ -436,8 +436,8 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y) # if !defined(mad_f_mul) # define mad_f_mul(x, y) \ - ({ register mad_fixed64hi_t __hi; \ - register mad_fixed64lo_t __lo; \ + ({ mad_fixed64hi_t __hi; \ + mad_fixed64lo_t __lo; \ MAD_F_MLX(__hi, __lo, (x), (y)); \ mad_f_scale64(__hi, __lo); \ })