Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove deprecated use of register keyword #9

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions gc/mad.h
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y)

# elif defined(FPM_ARM)

/*
/*
* This ARM V4 version is as accurate as FPM_64BIT but much faster. The
* least significant bit is properly rounded at no CPU cycle cost!
*/
Expand Down Expand Up @@ -476,8 +476,8 @@ mad_fixed_t mad_f_mul_inline(mad_fixed_t x, mad_fixed_t y)

# if !defined(mad_f_mul)
# define mad_f_mul(x, y) \
({ register mad_fixed64hi_t __hi; \
register mad_fixed64lo_t __lo; \
({ mad_fixed64hi_t __hi; \
mad_fixed64lo_t __lo; \
MAD_F_MLX(__hi, __lo, (x), (y)); \
mad_f_scale64(__hi, __lo); \
})
Expand Down
68 changes: 34 additions & 34 deletions gc/ogc/cast.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,37 +53,37 @@ static inline void CAST_Init(void)

static inline void CAST_SetGQR2(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR2,val);
}

static inline void CAST_SetGQR3(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR3,val);
}

static inline void CAST_SetGQR4(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR4,val);
}

static inline void CAST_SetGQR5(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR5,val);
}

static inline void CAST_SetGQR6(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR6,val);
}

static inline void CAST_SetGQR7(u32 type,s32 scale)
{
register u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
u32 val = ((((((scale)<<8)&GQR_SCALE_MASK)|(type))<<16)|((((scale)<<8)&GQR_SCALE_MASK)|(type)));
__set_gqr(GQR7,val);
}

Expand All @@ -94,58 +94,58 @@ static inline void CAST_SetGQR7(u32 type,s32 scale)
/* */
/******************************************************************/

static inline f32 __castu8f32(register u8 *in)
static inline f32 __castu8f32( u8 *in)
{
register f32 rval;
f32 rval;
__asm__ __volatile__ (
"psq_l %[rval],0(%[in]),1,2" : [rval]"=f"(rval) : [in]"r"(in)
);
return rval;
}

static inline f32 __castu16f32(register u16 *in)
static inline f32 __castu16f32( u16 *in)
{
register f32 rval;
f32 rval;
__asm__ __volatile__ (
"psq_l %[rval],0(%[in]),1,3" : [rval]"=f"(rval) : [in]"r"(in)
);
return rval;
}

static inline f32 __casts8f32(register s8 *in)
static inline f32 __casts8f32( s8 *in)
{
register f32 rval;
f32 rval;
__asm__ __volatile__ (
"psq_l %[rval],0(%[in]),1,4" : [rval]"=f"(rval) : [in]"r"(in)
);
return rval;
}

static inline f32 __casts16f32(register s16 *in)
static inline f32 __casts16f32( s16 *in)
{
register f32 rval;
f32 rval;
__asm__ __volatile__ (
"psq_l %[rval],0(%[in]),1,5" : [rval]"=f"(rval) : [in]"r"(in)
);
return rval;
}

static inline void castu8f32(register u8 *in,register volatile f32 *out)
static inline void castu8f32( u8 *in, volatile f32 *out)
{
*out = __castu8f32(in);
}

static inline void castu16f32(register u16 *in,register volatile f32 *out)
static inline void castu16f32( u16 *in, volatile f32 *out)
{
*out = __castu16f32(in);
}

static inline void casts8f32(register s8 *in,register volatile f32 *out)
static inline void casts8f32( s8 *in, volatile f32 *out)
{
*out = __casts8f32(in);
}

static inline void casts16f32(register s16 *in,register volatile f32 *out)
static inline void casts16f32( s16 *in, volatile f32 *out)
{
*out = __casts16f32(in);
}
Expand All @@ -156,11 +156,11 @@ static inline void casts16f32(register s16 *in,register volatile f32 *out)
/* */
/******************************************************************/

static inline u8 __castf32u8(register f32 in)
static inline u8 __castf32u8( f32 in)
{
f32 a;
register u8 rval;
register f32 *ptr = &a;
u8 rval;
f32 *ptr = &a;

__asm__ __volatile__ (
"psq_st %[in],0(%[ptr]),1,2\n"
Expand All @@ -170,11 +170,11 @@ static inline u8 __castf32u8(register f32 in)
return rval;
}

static inline u16 __castf32u16(register f32 in)
static inline u16 __castf32u16( f32 in)
{
f32 a;
register u16 rval;
register f32 *ptr = &a;
u16 rval;
f32 *ptr = &a;

__asm__ __volatile__ (
"psq_st %[in],0(%[ptr]),1,3\n"
Expand All @@ -184,11 +184,11 @@ static inline u16 __castf32u16(register f32 in)
return rval;
}

static inline s8 __castf32s8(register f32 in)
static inline s8 __castf32s8( f32 in)
{
f32 a;
register s8 rval;
register f32 *ptr = &a;
s8 rval;
f32 *ptr = &a;

__asm__ __volatile__ (
"psq_st %[in],0(%[ptr]),1,4\n"
Expand All @@ -198,11 +198,11 @@ static inline s8 __castf32s8(register f32 in)
return rval;
}

static inline s16 __castf32s16(register f32 in)
static inline s16 __castf32s16( f32 in)
{
f32 a;
register s16 rval;
register f32 *ptr = &a;
s16 rval;
f32 *ptr = &a;

__asm__ __volatile__ (
"psq_st %[in],0(%[ptr]),1,5\n"
Expand All @@ -212,22 +212,22 @@ static inline s16 __castf32s16(register f32 in)
return rval;
}

static inline void castf32u8(register f32 *in,register vu8 *out)
static inline void castf32u8( f32 *in, vu8 *out)
{
*out = __castf32u8(*in);
}

static inline void castf32u16(register f32 *in,register vu16 *out)
static inline void castf32u16( f32 *in, vu16 *out)
{
*out = __castf32u16(*in);
}

static inline void castf32s8(register f32 *in,register vs8 *out)
static inline void castf32s8( f32 *in, vs8 *out)
{
*out = __castf32s8(*in);
}

static inline void castf32s16(register f32 *in,register vs16 *out)
static inline void castf32s16( f32 *in, vs16 *out)
{
*out = __castf32s16(*in);
}
Expand Down
60 changes: 30 additions & 30 deletions gc/ogc/gu.h
Original file line number Diff line number Diff line change
Expand Up @@ -333,7 +333,7 @@ void guLookAt(Mtx mt,guVector *camPos,guVector *camUp,guVector *target);
* surface to the general viewing direction.
*
* \a a and \a b do not have to be unit vectors. Both of these vectors are assumed to be pointing towards the surface from the
* light or viewer, respectively. Local copies of these vectors are negated, normalized and added head to tail.
* light or viewer, respectively. Local copies of these vectors are negated, normalized and added head to tail.
*
* \a half is computed as a unit vector that points from the surface to halfway between the light and the viewing direction.
*
Expand All @@ -355,14 +355,14 @@ void c_guVecMultiplySR(Mtx mt,guVector *src,guVector *dst);
f32 c_guVecDotProduct(guVector *a,guVector *b);

#ifdef GEKKO
void ps_guVecAdd(register guVector *a,register guVector *b,register guVector *ab);
void ps_guVecSub(register guVector *a,register guVector *b,register guVector *ab);
void ps_guVecScale(register guVector *src,register guVector *dst,f32 scale);
void ps_guVecNormalize(register guVector *v);
void ps_guVecCross(register guVector *a,register guVector *b,register guVector *axb);
void ps_guVecMultiply(register Mtx mt,register guVector *src,register guVector *dst);
void ps_guVecMultiplySR(register Mtx mt,register guVector *src,register guVector *dst);
f32 ps_guVecDotProduct(register guVector *a,register guVector *b);
void ps_guVecAdd( guVector *a, guVector *b, guVector *ab);
void ps_guVecSub( guVector *a, guVector *b, guVector *ab);
void ps_guVecScale( guVector *src, guVector *dst,f32 scale);
void ps_guVecNormalize( guVector *v);
void ps_guVecCross( guVector *a, guVector *b, guVector *axb);
void ps_guVecMultiply( Mtx mt, guVector *src, guVector *dst);
void ps_guVecMultiplySR( Mtx mt, guVector *src, guVector *dst);
f32 ps_guVecDotProduct( guVector *a, guVector *b);
#endif //GEKKO

void c_guQuatAdd(guQuaternion *a,guQuaternion *b,guQuaternion *ab);
Expand All @@ -373,11 +373,11 @@ void c_guQuatInverse(guQuaternion *a,guQuaternion *d);
void c_guQuatMtx(guQuaternion *a,Mtx m);

#ifdef GEKKO
void ps_guQuatAdd(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab);
void ps_guQuatSub(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab);
void ps_guQuatMultiply(register guQuaternion *a,register guQuaternion *b,register guQuaternion *ab);
void ps_guQuatNormalize(register guQuaternion *a,register guQuaternion *d);
void ps_guQuatInverse(register guQuaternion *a,register guQuaternion *d);
void ps_guQuatAdd( guQuaternion *a, guQuaternion *b, guQuaternion *ab);
void ps_guQuatSub( guQuaternion *a, guQuaternion *b, guQuaternion *ab);
void ps_guQuatMultiply( guQuaternion *a, guQuaternion *b, guQuaternion *ab);
void ps_guQuatNormalize( guQuaternion *a, guQuaternion *d);
void ps_guQuatInverse( guQuaternion *a, guQuaternion *d);
#endif

void c_guMtxIdentity(Mtx mt);
Expand All @@ -399,22 +399,22 @@ void c_guMtxReflect(Mtx m,guVector *p,guVector *n);
void c_guMtxQuat(Mtx m,guQuaternion *a);

#ifdef GEKKO
void ps_guMtxIdentity(register Mtx mt);
void ps_guMtxCopy(register Mtx src,register Mtx dst);
void ps_guMtxConcat(register Mtx a,register Mtx b,register Mtx ab);
void ps_guMtxTranspose(register Mtx src,register Mtx xPose);
u32 ps_guMtxInverse(register Mtx src,register Mtx inv);
u32 ps_guMtxInvXpose(register Mtx src,register Mtx xPose);
void ps_guMtxScale(register Mtx mt,register f32 xS,register f32 yS,register f32 zS);
void ps_guMtxScaleApply(register Mtx src,register Mtx dst,register f32 xS,register f32 yS,register f32 zS);
void ps_guMtxApplyScale(register Mtx src,register Mtx dst,register f32 xS,register f32 yS,register f32 zS);
void ps_guMtxTrans(register Mtx mt,register f32 xT,register f32 yT,register f32 zT);
void ps_guMtxTransApply(register Mtx src,register Mtx dst,register f32 xT,register f32 yT,register f32 zT);
void ps_guMtxApplyTrans(register Mtx src,register Mtx dst,register f32 xT,register f32 yT,register f32 zT);
void ps_guMtxRotRad(register Mtx mt,register const char axis,register f32 rad);
void ps_guMtxRotTrig(register Mtx mt,register const char axis,register f32 sinA,register f32 cosA);
void ps_guMtxRotAxisRad(register Mtx mt,register guVector *axis,register f32 tmp0);
void ps_guMtxReflect(register Mtx m,register guVector *p,register guVector *n);
void ps_guMtxIdentity( Mtx mt);
void ps_guMtxCopy( Mtx src, Mtx dst);
void ps_guMtxConcat( Mtx a, Mtx b, Mtx ab);
void ps_guMtxTranspose( Mtx src, Mtx xPose);
u32 ps_guMtxInverse( Mtx src, Mtx inv);
u32 ps_guMtxInvXpose( Mtx src, Mtx xPose);
void ps_guMtxScale( Mtx mt, f32 xS, f32 yS, f32 zS);
void ps_guMtxScaleApply( Mtx src, Mtx dst, f32 xS, f32 yS, f32 zS);
void ps_guMtxApplyScale( Mtx src, Mtx dst, f32 xS, f32 yS, f32 zS);
void ps_guMtxTrans( Mtx mt, f32 xT, f32 yT, f32 zT);
void ps_guMtxTransApply( Mtx src, Mtx dst, f32 xT, f32 yT, f32 zT);
void ps_guMtxApplyTrans( Mtx src, Mtx dst, f32 xT, f32 yT, f32 zT);
void ps_guMtxRotRad( Mtx mt, const char axis, f32 rad);
void ps_guMtxRotTrig( Mtx mt, const char axis, f32 sinA, f32 cosA);
void ps_guMtxRotAxisRad( Mtx mt, guVector *axis, f32 tmp0);
void ps_guMtxReflect( Mtx m, guVector *p, guVector *n);
#endif //GEKKO

void guMtx44Identity(Mtx44 mt);
Expand Down
Loading