diff --git a/include/PSX/GTE/gte.hpp b/include/PSX/GTE/gte.hpp index 0c9b3335..979fc6b7 100644 --- a/include/PSX/GTE/gte.hpp +++ b/include/PSX/GTE/gte.hpp @@ -55,6 +55,38 @@ namespace JabyEngine { __asm__ volatile("ctc2 $14, $7" :: "r"(&matrix) : "$12", "$13", "$14"); } + /* + MulMatrix0 + + m0: first input + m1: second input + result: result of multiplication + returns: result + + Multiplies two matrices m0 and m1. + The function destroys the constant rotation matrix + */ + static MATRIX& mult_matrix(const MATRIX& m0, const MATRIX& m1, MATRIX& result) { + /* + Jaby: Somehow this code creates stack usage.... Investigate!! + */ + set_rot_matrix(m0); + + ldclmv(m1, 0); + rtir(); + stclmv(result, 0); + + ldclmv(m1, 1); + rtir(); + stclmv(result, 1); + + ldclmv(m1, 2); + rtir(); + stclmv(result, 2); + + return result; + } + /* SetGeomOffset(ofx,ofy) diff --git a/include/PSX/GTE/gte_instruction.hpp b/include/PSX/GTE/gte_instruction.hpp index 5fd9e707..d21dc3df 100644 --- a/include/PSX/GTE/gte_instruction.hpp +++ b/include/PSX/GTE/gte_instruction.hpp @@ -21,13 +21,33 @@ namespace JabyEngine { __asm__ volatile("lwc2 $5, 4(%0)" :: "r"(&vector)); } + // Load column vector of MATRIX to universal register + static __always_inline void ldclmv(const MATRIX& matrix, size_t col) { + __asm__ volatile("lhu $12, 0(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + __asm__ volatile("lhu $13, 6(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + __asm__ volatile("lhu $14, 12(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + __asm__ volatile("mtc2 $12, $9" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + __asm__ volatile("mtc2 $13, $10" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + __asm__ volatile("mtc2 $14, $11" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); + } + // Store flag static __always_inline void stflg(int32_t& flag) { __asm__ volatile("cfc2 $12, $31" :: "r"(&flag) : "$12", "memory"); __asm__ volatile("nop" :: "r"(&flag) : "$12", "memory"); __asm__ volatile("sw $12, 0(%0)" :: "r"(&flag) : "$12", "memory"); } - + + // Store MATRIX column from 16 bit universal register + static __always_inline void stclmv(MATRIX& matrix, size_t col) { + __asm__ volatile("mfc2 $12, $9" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + __asm__ volatile("mfc2 $13, $10" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + __asm__ volatile("mfc2 $14, $11" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + __asm__ volatile("sh $12, 0(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + __asm__ volatile("sh $13, 6(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + __asm__ volatile("sh $14, 12(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); + } + // Store VECTOR from 32 bit universal register static __always_inline void stlvnl(VECTOR& out_vector) { __asm__ volatile("swc2 $25, 0(%0)" :: "r"(&out_vector) : "memory"); @@ -44,5 +64,15 @@ namespace JabyEngine { __asm__ volatile("nop"); __asm__ volatile("cop2 0x0480012"); } + + /* + Variation of gte_rt + (Rotation Matrix)*(16 bit universal vector) + */ + static __always_inline void rtir() { + __asm__ volatile("nop"); + __asm__ volatile("nop"); + __asm__ volatile("cop2 0x049E012"); + } } } \ No newline at end of file