jabyengine/include/PSX/GTE/gte_instruction.hpp

123 lines
6.5 KiB
C++

#pragma once
#include "gte_types.hpp"
namespace JabyEngine {
namespace GTE {
// Load vertex or normal to vertex register 0
static __always_inline void ldv0(const SVECTOR& vector) {
__asm__ volatile("lwc2 $0, 0(%0)" :: "r"(&vector));
__asm__ volatile("lwc2 $1, 4(%0)" :: "r"(&vector));
}
// Load vertex or normal to vertex register 1
static __always_inline void ldv1(const SVECTOR& vector) {
__asm__ volatile("lwc2 $2, 0(%0)" :: "r"(&vector));
__asm__ volatile("lwc2 $3, 4(%0)" :: "r"(&vector));
}
// Load vertex or normal to vertex register 2
static __always_inline void ldv2(const SVECTOR& vector) {
__asm__ volatile("lwc2 $4, 0(%0)" :: "r"(&vector));
__asm__ volatile("lwc2 $5, 4(%0)" :: "r"(&vector));
}
// Load LS 16 bits of VECTOR to 16 bit universal vector.
static __always_inline void ldlv0(const VECTOR& vector) {
__asm__ volatile("lhu $13, 4(%0)" :: "r"(&vector) : "$12", "$13");
__asm__ volatile("lhu $12, 0(%0)" :: "r"(&vector) : "$12", "$13");
__asm__ volatile("sll $13, $13, 16" :: "r"(&vector) : "$12", "$13");
__asm__ volatile("or $12, $12, $13" :: "r"(&vector) : "$12", "$13");
__asm__ volatile("mtc2 $12, $0" :: "r"(&vector) : "$12", "$13");
__asm__ volatile("lwc2 $1, 8(%0)" :: "r"(&vector) : "$12", "$13");
}
// Loads a GPU VERTEX type
static __always_inline void ldgv0(const GPU::Vertex& vertex) {
__asm__ volatile("lwc2 $0, 0(%0)" :: "r"(&vertex));
__asm__ volatile("lwc2 $1, 0" :: "r"(&vertex));
}
// Load column vector of MATRIX to universal register
static __always_inline void ldclmv(const ROTMATRIX& matrix, size_t col) {
__asm__ volatile("lhu $12, 0(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
__asm__ volatile("lhu $13, 6(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
__asm__ volatile("lhu $14, 12(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
__asm__ volatile("mtc2 $12, $9" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
__asm__ volatile("mtc2 $13, $10" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
__asm__ volatile("mtc2 $14, $11" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14");
}
// Store flag
static __always_inline void stflg(int32_t& flag) {
__asm__ volatile("cfc2 $12, $31" :: "r"(&flag) : "$12", "memory");
__asm__ volatile("nop" :: "r"(&flag) : "$12", "memory");
__asm__ volatile("sw $12, 0(%0)" :: "r"(&flag) : "$12", "memory");
}
// Store MATRIX column from 16 bit universal register
static __always_inline void stclmv(ROTMATRIX& matrix, size_t col) {
__asm__ volatile("mfc2 $12, $9" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
__asm__ volatile("mfc2 $13, $10" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
__asm__ volatile("mfc2 $14, $11" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $12, 0(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $13, 6(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $14, 12(%0)" :: "r"(reinterpret_cast<uintptr_t>(&matrix) + (col << 1)) : "$12", "$13", "$14", "memory");
}
// Store VECTOR from 32 bit universal register
static __always_inline void stlvnl(VECTOR& out_vector) {
__asm__ volatile("swc2 $25, 0(%0)" :: "r"(&out_vector) : "memory");
__asm__ volatile("swc2 $26, 4(%0)" :: "r"(&out_vector) : "memory");
__asm__ volatile("swc2 $27, 8(%0)" :: "r"(&out_vector) : "memory");
}
// Modify to store in VERTEX?
// Store SVECTOR from 16 bit universal register
static __always_inline void stsv(SVECTOR& out_vector) {
__asm__ volatile("mfc2 $12, $9" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
__asm__ volatile("mfc2 $13, $10" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
__asm__ volatile("mfc2 $14, $11" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $12, 0(%0)" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $13, 2(%0)" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $14, 4(%0)" :: "r"(&out_vector) : "$12", "$13", "$14", "memory");
}
// Stores result into a GPU Vertex type
static __always_inline void stgv(GPU::Vertex& out_vertex) {
__asm__ volatile("mfc2 $12, $9" :: "r"(&out_vertex) : "$12", "$13", "$14", "memory");
__asm__ volatile("mfc2 $13, $10" :: "r"(&out_vertex) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $12, 0(%0)" :: "r"(&out_vertex) : "$12", "$13", "$14", "memory");
__asm__ volatile("sh $13, 2(%0)" :: "r"(&out_vertex) : "$12", "$13", "$14", "memory");
}
/*
Kernel of RotTrans
(Transfer vector)+(Rotation Matrix)*(vertex register 0)
*/
static __always_inline void rt() {
__asm__ volatile("nop");
__asm__ volatile("nop");
__asm__ volatile("cop2 0x0480012");
}
/*
Variation of gte_rt
(Rotation Matrix)*(vertex register 0).
*/
static __always_inline void rtv0() {
__asm__ volatile("nop;");
__asm__ volatile("nop;");
__asm__ volatile("cop2 0x0486012;");
}
/*
Variation of gte_rt
(Rotation Matrix)*(16 bit universal vector)
*/
static __always_inline void rtir() {
__asm__ volatile("nop");
__asm__ volatile("nop");
__asm__ volatile("cop2 0x049E012");
}
}
}