#pragma once #include "gte_types.hpp" namespace JabyEngine { namespace GTE { // Load vertex or normal to vertex register 0 static __always_inline void ldv0(const SVECTOR& vector) { __asm__ volatile("lwc2 $0, 0(%0)" :: "r"(&vector)); __asm__ volatile("lwc2 $1, 4(%0)" :: "r"(&vector)); } // Load vertex or normal to vertex register 1 static __always_inline void ldv1(const SVECTOR& vector) { __asm__ volatile("lwc2 $2, 0(%0)" :: "r"(&vector)); __asm__ volatile("lwc2 $3, 4(%0)" :: "r"(&vector)); } // Load vertex or normal to vertex register 2 static __always_inline void ldv2(const SVECTOR& vector) { __asm__ volatile("lwc2 $4, 0(%0)" :: "r"(&vector)); __asm__ volatile("lwc2 $5, 4(%0)" :: "r"(&vector)); } // Load column vector of MATRIX to universal register static __always_inline void ldclmv(const MATRIX& matrix, size_t col) { __asm__ volatile("lhu $12, 0(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); __asm__ volatile("lhu $13, 6(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); __asm__ volatile("lhu $14, 12(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); __asm__ volatile("mtc2 $12, $9" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); __asm__ volatile("mtc2 $13, $10" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); __asm__ volatile("mtc2 $14, $11" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14"); } // Store flag static __always_inline void stflg(int32_t& flag) { __asm__ volatile("cfc2 $12, $31" :: "r"(&flag) : "$12", "memory"); __asm__ volatile("nop" :: "r"(&flag) : "$12", "memory"); __asm__ volatile("sw $12, 0(%0)" :: "r"(&flag) : "$12", "memory"); } // Store MATRIX column from 16 bit universal register static __always_inline void stclmv(MATRIX& matrix, size_t col) { __asm__ volatile("mfc2 $12, $9" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); __asm__ volatile("mfc2 $13, $10" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); __asm__ volatile("mfc2 $14, $11" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); __asm__ volatile("sh $12, 0(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); __asm__ volatile("sh $13, 6(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); __asm__ volatile("sh $14, 12(%0)" :: "r"(reinterpret_cast(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory"); } // Store VECTOR from 32 bit universal register static __always_inline void stlvnl(VECTOR& out_vector) { __asm__ volatile("swc2 $25, 0(%0)" :: "r"(&out_vector) : "memory"); __asm__ volatile("swc2 $26, 4(%0)" :: "r"(&out_vector) : "memory"); __asm__ volatile("swc2 $27, 8(%0)" :: "r"(&out_vector) : "memory"); } /* Kernel of RotTrans (Transfer vector)+(Rotation Matrix)*(vertex register 0) */ static __always_inline void rt() { __asm__ volatile("nop"); __asm__ volatile("nop"); __asm__ volatile("cop2 0x0480012"); } /* Variation of gte_rt (Rotation Matrix)*(16 bit universal vector) */ static __always_inline void rtir() { __asm__ volatile("nop"); __asm__ volatile("nop"); __asm__ volatile("cop2 0x049E012"); } } }