obj-y += translate.o cpu.o gdbstub.o helper.o
obj-y += op_helper.o cp0_helper.o fpu_helper.o
-obj-y += dsp_helper.o lmi_helper.o msa_helper.o
+obj-y += dsp_helper.o lmmi_helper.o msa_helper.o
obj-$(CONFIG_SOFTMMU) += mips-semi.o
obj-$(CONFIG_SOFTMMU) += machine.o cp0_timer.o
obj-$(CONFIG_KVM) += kvm.o
* 3 Config3 WatchLo3 WatchHi
* 4 Config4 WatchLo4 WatchHi
* 5 Config5 WatchLo5 WatchHi
- * 6 WatchLo6 WatchHi
- * 7 WatchLo7 WatchHi
+ * 6 Config6 WatchLo6 WatchHi
+ * 7 Config7 WatchLo7 WatchHi
*
*
* Register 20 Register 21 Register 22 Register 23
#define CP0C5_UFR 2
#define CP0C5_NFExists 0
int32_t CP0_Config6;
+ int32_t CP0_Config6_rw_bitmask;
+#define CP0C6_BPPASS 31
+#define CP0C6_KPOS 24
+#define CP0C6_KE 23
+#define CP0C6_VTLBONLY 22
+#define CP0C6_LASX 21
+#define CP0C6_SSEN 20
+#define CP0C6_DISDRTIME 19
+#define CP0C6_PIXNUEN 18
+#define CP0C6_SCRAND 17
+#define CP0C6_LLEXCEN 16
+#define CP0C6_DISVC 15
+#define CP0C6_VCLRU 14
+#define CP0C6_DCLRU 13
+#define CP0C6_PIXUEN 12
+#define CP0C6_DISBLKLYEN 11
+#define CP0C6_UMEMUALEN 10
+#define CP0C6_SFBEN 8
+#define CP0C6_FLTINT 7
+#define CP0C6_VLTINT 6
+#define CP0C6_DISBTB 5
+#define CP0C6_STPREFCTL 2
+#define CP0C6_INSTPREF 1
+#define CP0C6_DATAPREF 0
int32_t CP0_Config7;
+ int64_t CP0_Config7_rw_bitmask;
+#define CP0C7_NAPCGEN 2
+#define CP0C7_UNIMUEN 1
+#define CP0C7_VFPUCGEN 0
uint64_t CP0_LLAddr;
uint64_t CP0_MAAR[MIPS_MAAR_MAX];
int32_t CP0_MAARI;
int32_t CP0_Config5;
int32_t CP0_Config5_rw_bitmask;
int32_t CP0_Config6;
+ int32_t CP0_Config6_rw_bitmask;
int32_t CP0_Config7;
+ int32_t CP0_Config7_rw_bitmask;
target_ulong CP0_LLAddr_rw_bitmask;
int CP0_LLAddr_shift;
int32_t SYNCI_Step;
+++ /dev/null
-/*
- * Loongson Multimedia Instruction emulation helpers for QEMU.
- *
- * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#include "qemu/osdep.h"
-#include "cpu.h"
-#include "exec/helper-proto.h"
-
-/*
- * If the byte ordering doesn't matter, i.e. all columns are treated
- * identically, then this union can be used directly. If byte ordering
- * does matter, we generally ignore dumping to memory.
- */
-typedef union {
- uint8_t ub[8];
- int8_t sb[8];
- uint16_t uh[4];
- int16_t sh[4];
- uint32_t uw[2];
- int32_t sw[2];
- uint64_t d;
-} LMIValue;
-
-/* Some byte ordering issues can be mitigated by XORing in the following. */
-#ifdef HOST_WORDS_BIGENDIAN
-# define BYTE_ORDER_XOR(N) N
-#else
-# define BYTE_ORDER_XOR(N) 0
-#endif
-
-#define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x)
-#define SATUB(x) (x > 0xff ? 0xff : x)
-
-#define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x)
-#define SATUH(x) (x > 0xffff ? 0xffff : x)
-
-#define SATSW(x) \
- (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x)
-#define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x)
-
-uint64_t helper_paddsb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- int r = vs.sb[i] + vt.sb[i];
- vs.sb[i] = SATSB(r);
- }
- return vs.d;
-}
-
-uint64_t helper_paddusb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- int r = vs.ub[i] + vt.ub[i];
- vs.ub[i] = SATUB(r);
- }
- return vs.d;
-}
-
-uint64_t helper_paddsh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- int r = vs.sh[i] + vt.sh[i];
- vs.sh[i] = SATSH(r);
- }
- return vs.d;
-}
-
-uint64_t helper_paddush(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- int r = vs.uh[i] + vt.uh[i];
- vs.uh[i] = SATUH(r);
- }
- return vs.d;
-}
-
-uint64_t helper_paddb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- vs.ub[i] += vt.ub[i];
- }
- return vs.d;
-}
-
-uint64_t helper_paddh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- vs.uh[i] += vt.uh[i];
- }
- return vs.d;
-}
-
-uint64_t helper_paddw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 2; ++i) {
- vs.uw[i] += vt.uw[i];
- }
- return vs.d;
-}
-
-uint64_t helper_psubsb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- int r = vs.sb[i] - vt.sb[i];
- vs.sb[i] = SATSB(r);
- }
- return vs.d;
-}
-
-uint64_t helper_psubusb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- int r = vs.ub[i] - vt.ub[i];
- vs.ub[i] = SATUB(r);
- }
- return vs.d;
-}
-
-uint64_t helper_psubsh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- int r = vs.sh[i] - vt.sh[i];
- vs.sh[i] = SATSH(r);
- }
- return vs.d;
-}
-
-uint64_t helper_psubush(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- int r = vs.uh[i] - vt.uh[i];
- vs.uh[i] = SATUH(r);
- }
- return vs.d;
-}
-
-uint64_t helper_psubb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- vs.ub[i] -= vt.ub[i];
- }
- return vs.d;
-}
-
-uint64_t helper_psubh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- vs.uh[i] -= vt.uh[i];
- }
- return vs.d;
-}
-
-uint64_t helper_psubw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned int i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 2; ++i) {
- vs.uw[i] -= vt.uw[i];
- }
- return vs.d;
-}
-
-uint64_t helper_pshufh(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(3);
- LMIValue vd, vs;
- unsigned i;
-
- vs.d = fs;
- vd.d = 0;
- for (i = 0; i < 4; i++, ft >>= 2) {
- vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host];
- }
- return vd.d;
-}
-
-uint64_t helper_packsswh(uint64_t fs, uint64_t ft)
-{
- uint64_t fd = 0;
- int64_t tmp;
-
- tmp = (int32_t)(fs >> 0);
- tmp = SATSH(tmp);
- fd |= (tmp & 0xffff) << 0;
-
- tmp = (int32_t)(fs >> 32);
- tmp = SATSH(tmp);
- fd |= (tmp & 0xffff) << 16;
-
- tmp = (int32_t)(ft >> 0);
- tmp = SATSH(tmp);
- fd |= (tmp & 0xffff) << 32;
-
- tmp = (int32_t)(ft >> 32);
- tmp = SATSH(tmp);
- fd |= (tmp & 0xffff) << 48;
-
- return fd;
-}
-
-uint64_t helper_packsshb(uint64_t fs, uint64_t ft)
-{
- uint64_t fd = 0;
- unsigned int i;
-
- for (i = 0; i < 4; ++i) {
- int16_t tmp = fs >> (i * 16);
- tmp = SATSB(tmp);
- fd |= (uint64_t)(tmp & 0xff) << (i * 8);
- }
- for (i = 0; i < 4; ++i) {
- int16_t tmp = ft >> (i * 16);
- tmp = SATSB(tmp);
- fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
- }
-
- return fd;
-}
-
-uint64_t helper_packushb(uint64_t fs, uint64_t ft)
-{
- uint64_t fd = 0;
- unsigned int i;
-
- for (i = 0; i < 4; ++i) {
- int16_t tmp = fs >> (i * 16);
- tmp = SATUB(tmp);
- fd |= (uint64_t)(tmp & 0xff) << (i * 8);
- }
- for (i = 0; i < 4; ++i) {
- int16_t tmp = ft >> (i * 16);
- tmp = SATUB(tmp);
- fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
- }
-
- return fd;
-}
-
-uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft)
-{
- return (fs & 0xffffffff) | (ft << 32);
-}
-
-uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft)
-{
- return (fs >> 32) | (ft & ~0xffffffffull);
-}
-
-uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(3);
- LMIValue vd, vs, vt;
-
- vs.d = fs;
- vt.d = ft;
- vd.uh[0 ^ host] = vs.uh[0 ^ host];
- vd.uh[1 ^ host] = vt.uh[0 ^ host];
- vd.uh[2 ^ host] = vs.uh[1 ^ host];
- vd.uh[3 ^ host] = vt.uh[1 ^ host];
-
- return vd.d;
-}
-
-uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(3);
- LMIValue vd, vs, vt;
-
- vs.d = fs;
- vt.d = ft;
- vd.uh[0 ^ host] = vs.uh[2 ^ host];
- vd.uh[1 ^ host] = vt.uh[2 ^ host];
- vd.uh[2 ^ host] = vs.uh[3 ^ host];
- vd.uh[3 ^ host] = vt.uh[3 ^ host];
-
- return vd.d;
-}
-
-uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(7);
- LMIValue vd, vs, vt;
-
- vs.d = fs;
- vt.d = ft;
- vd.ub[0 ^ host] = vs.ub[0 ^ host];
- vd.ub[1 ^ host] = vt.ub[0 ^ host];
- vd.ub[2 ^ host] = vs.ub[1 ^ host];
- vd.ub[3 ^ host] = vt.ub[1 ^ host];
- vd.ub[4 ^ host] = vs.ub[2 ^ host];
- vd.ub[5 ^ host] = vt.ub[2 ^ host];
- vd.ub[6 ^ host] = vs.ub[3 ^ host];
- vd.ub[7 ^ host] = vt.ub[3 ^ host];
-
- return vd.d;
-}
-
-uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(7);
- LMIValue vd, vs, vt;
-
- vs.d = fs;
- vt.d = ft;
- vd.ub[0 ^ host] = vs.ub[4 ^ host];
- vd.ub[1 ^ host] = vt.ub[4 ^ host];
- vd.ub[2 ^ host] = vs.ub[5 ^ host];
- vd.ub[3 ^ host] = vt.ub[5 ^ host];
- vd.ub[4 ^ host] = vs.ub[6 ^ host];
- vd.ub[5 ^ host] = vt.ub[6 ^ host];
- vd.ub[6 ^ host] = vs.ub[7 ^ host];
- vd.ub[7 ^ host] = vt.ub[7 ^ host];
-
- return vd.d;
-}
-
-uint64_t helper_pavgh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1;
- }
- return vs.d;
-}
-
-uint64_t helper_pavgb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; i++) {
- vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1;
- }
- return vs.d;
-}
-
-uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pminsh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pmaxub(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pminub(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 2; i++) {
- vs.uw[i] = -(vs.uw[i] == vt.uw[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 2; i++) {
- vs.uw[i] = -(vs.uw[i] > vt.uw[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.uh[i] = -(vs.uh[i] == vt.uh[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; i++) {
- vs.uh[i] = -(vs.uh[i] > vt.uh[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; i++) {
- vs.ub[i] = -(vs.ub[i] == vt.ub[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; i++) {
- vs.ub[i] = -(vs.ub[i] > vt.ub[i]);
- }
- return vs.d;
-}
-
-uint64_t helper_psllw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 31) {
- return 0;
- }
- vs.d = fs;
- for (i = 0; i < 2; ++i) {
- vs.uw[i] <<= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_psrlw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 31) {
- return 0;
- }
- vs.d = fs;
- for (i = 0; i < 2; ++i) {
- vs.uw[i] >>= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_psraw(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 31) {
- ft = 31;
- }
- vs.d = fs;
- for (i = 0; i < 2; ++i) {
- vs.sw[i] >>= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_psllh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 15) {
- return 0;
- }
- vs.d = fs;
- for (i = 0; i < 4; ++i) {
- vs.uh[i] <<= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_psrlh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 15) {
- return 0;
- }
- vs.d = fs;
- for (i = 0; i < 4; ++i) {
- vs.uh[i] >>= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_psrah(uint64_t fs, uint64_t ft)
-{
- LMIValue vs;
- unsigned i;
-
- ft &= 0x7f;
- if (ft > 15) {
- ft = 15;
- }
- vs.d = fs;
- for (i = 0; i < 4; ++i) {
- vs.sh[i] >>= ft;
- }
- return vs.d;
-}
-
-uint64_t helper_pmullh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- vs.sh[i] *= vt.sh[i];
- }
- return vs.d;
-}
-
-uint64_t helper_pmulhh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- int32_t r = vs.sh[i] * vt.sh[i];
- vs.sh[i] = r >> 16;
- }
- return vs.d;
-}
-
-uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 4; ++i) {
- uint32_t r = vs.uh[i] * vt.uh[i];
- vs.uh[i] = r >> 16;
- }
- return vs.d;
-}
-
-uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft)
-{
- unsigned host = BYTE_ORDER_XOR(3);
- LMIValue vs, vt;
- uint32_t p0, p1;
-
- vs.d = fs;
- vt.d = ft;
- p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host];
- p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host];
- p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host];
- p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host];
-
- return ((uint64_t)p1 << 32) | p0;
-}
-
-uint64_t helper_pasubub(uint64_t fs, uint64_t ft)
-{
- LMIValue vs, vt;
- unsigned i;
-
- vs.d = fs;
- vt.d = ft;
- for (i = 0; i < 8; ++i) {
- int r = vs.ub[i] - vt.ub[i];
- vs.ub[i] = (r < 0 ? -r : r);
- }
- return vs.d;
-}
-
-uint64_t helper_biadd(uint64_t fs)
-{
- unsigned i, fd;
-
- for (i = fd = 0; i < 8; ++i) {
- fd += (fs >> (i * 8)) & 0xff;
- }
- return fd & 0xffff;
-}
-
-uint64_t helper_pmovmskb(uint64_t fs)
-{
- unsigned fd = 0;
-
- fd |= ((fs >> 7) & 1) << 0;
- fd |= ((fs >> 15) & 1) << 1;
- fd |= ((fs >> 23) & 1) << 2;
- fd |= ((fs >> 31) & 1) << 3;
- fd |= ((fs >> 39) & 1) << 4;
- fd |= ((fs >> 47) & 1) << 5;
- fd |= ((fs >> 55) & 1) << 6;
- fd |= ((fs >> 63) & 1) << 7;
-
- return fd & 0xff;
-}
--- /dev/null
+/*
+ * Loongson Multimedia Instruction emulation helpers for QEMU.
+ *
+ * Copyright (c) 2011 Richard Henderson <rth@twiddle.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "qemu/osdep.h"
+#include "cpu.h"
+#include "exec/helper-proto.h"
+
+/*
+ * If the byte ordering doesn't matter, i.e. all columns are treated
+ * identically, then this union can be used directly. If byte ordering
+ * does matter, we generally ignore dumping to memory.
+ */
+typedef union {
+ uint8_t ub[8];
+ int8_t sb[8];
+ uint16_t uh[4];
+ int16_t sh[4];
+ uint32_t uw[2];
+ int32_t sw[2];
+ uint64_t d;
+} LMIValue;
+
+/* Some byte ordering issues can be mitigated by XORing in the following. */
+#ifdef HOST_WORDS_BIGENDIAN
+# define BYTE_ORDER_XOR(N) N
+#else
+# define BYTE_ORDER_XOR(N) 0
+#endif
+
+#define SATSB(x) (x < -0x80 ? -0x80 : x > 0x7f ? 0x7f : x)
+#define SATUB(x) (x > 0xff ? 0xff : x)
+
+#define SATSH(x) (x < -0x8000 ? -0x8000 : x > 0x7fff ? 0x7fff : x)
+#define SATUH(x) (x > 0xffff ? 0xffff : x)
+
+#define SATSW(x) \
+ (x < -0x80000000ll ? -0x80000000ll : x > 0x7fffffff ? 0x7fffffff : x)
+#define SATUW(x) (x > 0xffffffffull ? 0xffffffffull : x)
+
+uint64_t helper_paddsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] + vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] + vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] + vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] + vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] += vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] += vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_paddw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] += vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.sb[i] - vt.sb[i];
+ vs.sb[i] = SATSB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubusb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = SATUB(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.sh[i] - vt.sh[i];
+ vs.sh[i] = SATSH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubush(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int r = vs.uh[i] - vt.uh[i];
+ vs.uh[i] = SATUH(r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ vs.ub[i] -= vt.ub[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] -= vt.uh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_psubw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned int i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] -= vt.uw[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pshufh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs;
+ unsigned i;
+
+ vs.d = fs;
+ vd.d = 0;
+ for (i = 0; i < 4; i++, ft >>= 2) {
+ vd.uh[i ^ host] = vs.uh[(ft & 3) ^ host];
+ }
+ return vd.d;
+}
+
+uint64_t helper_packsswh(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ int64_t tmp;
+
+ tmp = (int32_t)(fs >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 0;
+
+ tmp = (int32_t)(fs >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 16;
+
+ tmp = (int32_t)(ft >> 0);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 32;
+
+ tmp = (int32_t)(ft >> 32);
+ tmp = SATSH(tmp);
+ fd |= (tmp & 0xffff) << 48;
+
+ return fd;
+}
+
+uint64_t helper_packsshb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATSB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_packushb(uint64_t fs, uint64_t ft)
+{
+ uint64_t fd = 0;
+ unsigned int i;
+
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = fs >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8);
+ }
+ for (i = 0; i < 4; ++i) {
+ int16_t tmp = ft >> (i * 16);
+ tmp = SATUB(tmp);
+ fd |= (uint64_t)(tmp & 0xff) << (i * 8 + 32);
+ }
+
+ return fd;
+}
+
+uint64_t helper_punpcklwd(uint64_t fs, uint64_t ft)
+{
+ return (fs & 0xffffffff) | (ft << 32);
+}
+
+uint64_t helper_punpckhwd(uint64_t fs, uint64_t ft)
+{
+ return (fs >> 32) | (ft & ~0xffffffffull);
+}
+
+uint64_t helper_punpcklhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[0 ^ host];
+ vd.uh[1 ^ host] = vt.uh[0 ^ host];
+ vd.uh[2 ^ host] = vs.uh[1 ^ host];
+ vd.uh[3 ^ host] = vt.uh[1 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.uh[0 ^ host] = vs.uh[2 ^ host];
+ vd.uh[1 ^ host] = vt.uh[2 ^ host];
+ vd.uh[2 ^ host] = vs.uh[3 ^ host];
+ vd.uh[3 ^ host] = vt.uh[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpcklbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[0 ^ host];
+ vd.ub[1 ^ host] = vt.ub[0 ^ host];
+ vd.ub[2 ^ host] = vs.ub[1 ^ host];
+ vd.ub[3 ^ host] = vt.ub[1 ^ host];
+ vd.ub[4 ^ host] = vs.ub[2 ^ host];
+ vd.ub[5 ^ host] = vt.ub[2 ^ host];
+ vd.ub[6 ^ host] = vs.ub[3 ^ host];
+ vd.ub[7 ^ host] = vt.ub[3 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_punpckhbh(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(7);
+ LMIValue vd, vs, vt;
+
+ vs.d = fs;
+ vt.d = ft;
+ vd.ub[0 ^ host] = vs.ub[4 ^ host];
+ vd.ub[1 ^ host] = vt.ub[4 ^ host];
+ vd.ub[2 ^ host] = vs.ub[5 ^ host];
+ vd.ub[3 ^ host] = vt.ub[5 ^ host];
+ vd.ub[4 ^ host] = vs.ub[6 ^ host];
+ vd.ub[5 ^ host] = vt.ub[6 ^ host];
+ vd.ub[6 ^ host] = vs.ub[7 ^ host];
+ vd.ub[7 ^ host] = vt.ub[7 ^ host];
+
+ return vd.d;
+}
+
+uint64_t helper_pavgh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = (vs.uh[i] + vt.uh[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pavgb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = (vs.ub[i] + vt.ub[i] + 1) >> 1;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] >= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminsh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.sh[i] = (vs.sh[i] <= vt.sh[i] ? vs.sh[i] : vt.sh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaxub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] >= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pminub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.ub[i] = (vs.ub[i] <= vt.ub[i] ? vs.ub[i] : vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] == vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 2; i++) {
+ vs.uw[i] = -(vs.uw[i] > vt.uw[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] == vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgth(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; i++) {
+ vs.uh[i] = -(vs.uh[i] > vt.uh[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpeqb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] == vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_pcmpgtb(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; i++) {
+ vs.ub[i] = -(vs.ub[i] > vt.ub[i]);
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.uw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psraw(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 31) {
+ ft = 31;
+ }
+ vs.d = fs;
+ for (i = 0; i < 2; ++i) {
+ vs.sw[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psllh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] <<= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrlh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ return 0;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.uh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_psrah(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs;
+ unsigned i;
+
+ ft &= 0x7f;
+ if (ft > 15) {
+ ft = 15;
+ }
+ vs.d = fs;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] >>= ft;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmullh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ vs.sh[i] *= vt.sh[i];
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ int32_t r = vs.sh[i] * vt.sh[i];
+ vs.sh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmulhuh(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 4; ++i) {
+ uint32_t r = vs.uh[i] * vt.uh[i];
+ vs.uh[i] = r >> 16;
+ }
+ return vs.d;
+}
+
+uint64_t helper_pmaddhw(uint64_t fs, uint64_t ft)
+{
+ unsigned host = BYTE_ORDER_XOR(3);
+ LMIValue vs, vt;
+ uint32_t p0, p1;
+
+ vs.d = fs;
+ vt.d = ft;
+ p0 = vs.sh[0 ^ host] * vt.sh[0 ^ host];
+ p0 += vs.sh[1 ^ host] * vt.sh[1 ^ host];
+ p1 = vs.sh[2 ^ host] * vt.sh[2 ^ host];
+ p1 += vs.sh[3 ^ host] * vt.sh[3 ^ host];
+
+ return ((uint64_t)p1 << 32) | p0;
+}
+
+uint64_t helper_pasubub(uint64_t fs, uint64_t ft)
+{
+ LMIValue vs, vt;
+ unsigned i;
+
+ vs.d = fs;
+ vt.d = ft;
+ for (i = 0; i < 8; ++i) {
+ int r = vs.ub[i] - vt.ub[i];
+ vs.ub[i] = (r < 0 ? -r : r);
+ }
+ return vs.d;
+}
+
+uint64_t helper_biadd(uint64_t fs)
+{
+ unsigned i, fd;
+
+ for (i = fd = 0; i < 8; ++i) {
+ fd += (fs >> (i * 8)) & 0xff;
+ }
+ return fd & 0xffff;
+}
+
+uint64_t helper_pmovmskb(uint64_t fs)
+{
+ unsigned fd = 0;
+
+ fd |= ((fs >> 7) & 1) << 0;
+ fd |= ((fs >> 15) & 1) << 1;
+ fd |= ((fs >> 23) & 1) << 2;
+ fd |= ((fs >> 31) & 1) << 3;
+ fd |= ((fs >> 39) & 1) << 4;
+ fd |= ((fs >> 47) & 1) << 5;
+ fd |= ((fs >> 55) & 1) << 6;
+ fd |= ((fs >> 63) & 1) << 7;
+
+ return fd & 0xff;
+}
* ------------------------------------------------
*/
/*
- * bits 0-31: MIPS base instruction sets
+ * bits 0-23: MIPS base instruction sets
*/
#define ISA_MIPS1 0x0000000000000001ULL
#define ISA_MIPS2 0x0000000000000002ULL
#define ISA_MIPS64R6 0x0000000000004000ULL
#define ISA_NANOMIPS32 0x0000000000008000ULL
/*
- * bits 32-47: MIPS ASEs
+ * bits 24-39: MIPS ASEs
*/
-#define ASE_MIPS16 0x0000000100000000ULL
-#define ASE_MIPS3D 0x0000000200000000ULL
-#define ASE_MDMX 0x0000000400000000ULL
-#define ASE_DSP 0x0000000800000000ULL
-#define ASE_DSP_R2 0x0000001000000000ULL
-#define ASE_DSP_R3 0x0000002000000000ULL
-#define ASE_MT 0x0000004000000000ULL
-#define ASE_SMARTMIPS 0x0000008000000000ULL
-#define ASE_MICROMIPS 0x0000010000000000ULL
-#define ASE_MSA 0x0000020000000000ULL
+#define ASE_MIPS16 0x0000000001000000ULL
+#define ASE_MIPS3D 0x0000000002000000ULL
+#define ASE_MDMX 0x0000000004000000ULL
+#define ASE_DSP 0x0000000008000000ULL
+#define ASE_DSP_R2 0x0000000010000000ULL
+#define ASE_DSP_R3 0x0000000020000000ULL
+#define ASE_MT 0x0000000040000000ULL
+#define ASE_SMARTMIPS 0x0000000080000000ULL
+#define ASE_MICROMIPS 0x0000000100000000ULL
+#define ASE_MSA 0x0000000200000000ULL
/*
- * bits 48-55: vendor-specific base instruction sets
+ * bits 40-51: vendor-specific base instruction sets
*/
-#define INSN_LOONGSON2E 0x0001000000000000ULL
-#define INSN_LOONGSON2F 0x0002000000000000ULL
-#define INSN_VR54XX 0x0004000000000000ULL
-#define INSN_R5900 0x0008000000000000ULL
+#define INSN_VR54XX 0x0000010000000000ULL
+#define INSN_R5900 0x0000020000000000ULL
+#define INSN_LOONGSON2E 0x0000040000000000ULL
+#define INSN_LOONGSON2F 0x0000080000000000ULL
+#define INSN_LOONGSON3A 0x0000100000000000ULL
/*
- * bits 56-63: vendor-specific ASEs
+ * bits 52-63: vendor-specific ASEs
*/
-#define ASE_MMI 0x0100000000000000ULL
-#define ASE_MXU 0x0200000000000000ULL
+#define ASE_MMI 0x0010000000000000ULL
+#define ASE_MXU 0x0020000000000000ULL
+#define ASE_LMMI 0x0040000000000000ULL
+#define ASE_LEXT 0x0080000000000000ULL
/* MIPS CPU defines. */
#define CPU_MIPS1 (ISA_MIPS1)
/* Wave Computing: "nanoMIPS" */
#define CPU_NANOMIPS32 (CPU_MIPS32R6 | ISA_NANOMIPS32)
+#define CPU_LOONGSON3A (CPU_MIPS64R2 | INSN_LOONGSON3A)
+
/*
* Strictly follow the architecture standard:
* - Disallow "special" instruction handling for PMON/SPIM.
env->CP0_Config5 = env->cpu_model->CP0_Config5;
env->CP0_Config5_rw_bitmask = env->cpu_model->CP0_Config5_rw_bitmask;
env->CP0_Config6 = env->cpu_model->CP0_Config6;
+ env->CP0_Config6_rw_bitmask = env->cpu_model->CP0_Config6_rw_bitmask;
env->CP0_Config7 = env->cpu_model->CP0_Config7;
+ env->CP0_Config7_rw_bitmask = env->cpu_model->CP0_Config7_rw_bitmask;
env->CP0_LLAddr_rw_bitmask = env->cpu_model->CP0_LLAddr_rw_bitmask
<< env->cpu_model->CP0_LLAddr_shift;
env->CP0_LLAddr_shift = env->cpu_model->CP0_LLAddr_shift;
.insn_flags = CPU_LOONGSON2F,
.mmu_type = MMU_TYPE_R4000,
},
+ {
+ .name = "Loongson-3A1000",
+ .CP0_PRid = 0x6305,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (3 << CP0C1_IS) | (4 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (3 << CP0C1_DS) | (4 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2 | (7 << CP0C2_SS) | (4 << CP0C2_SL) |
+ (3 << CP0C2_SA),
+ .CP0_Config3 = MIPS_CONFIG3 | (1 << CP0C3_LPA),
+ .CP0_LLAddr_rw_bitmask = 0,
+ .SYNCI_Step = 32,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x74D8FFFF,
+ .CP0_PageGrain = (1 << CP0PG_ELPA),
+ .CP0_PageGrain_rw_bitmask = (1 << CP0PG_ELPA),
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV) | (0x1 << FCR0_F64) |
+ (0x1 << FCR0_PS) | (0x1 << FCR0_L) | (0x1 << FCR0_W) |
+ (0x1 << FCR0_D) | (0x1 << FCR0_S),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 42,
+ .PABITS = 48,
+ .insn_flags = CPU_LOONGSON3A,
+ .mmu_type = MMU_TYPE_R4000,
+ },
+ {
+ .name = "Loongson-3A4000",
+ .CP0_PRid = 0x14C000,
+ /* 64KB I-cache and d-cache. 4 way with 32 bit cache line size. */
+ .CP0_Config0 = MIPS_CONFIG0 | (0x1 << CP0C0_AR) | (0x2 << CP0C0_AT) |
+ (MMU_TYPE_R4000 << CP0C0_MT),
+ .CP0_Config1 = MIPS_CONFIG1 | (1 << CP0C1_FP) | (63 << CP0C1_MMU) |
+ (2 << CP0C1_IS) | (5 << CP0C1_IL) | (3 << CP0C1_IA) |
+ (2 << CP0C1_DS) | (5 << CP0C1_DL) | (3 << CP0C1_DA) |
+ (1 << CP0C1_PC) | (1 << CP0C1_WR) | (1 << CP0C1_EP),
+ .CP0_Config2 = MIPS_CONFIG2 | (5 << CP0C2_SS) | (5 << CP0C2_SL) |
+ (15 << CP0C2_SA),
+ .CP0_Config3 = MIPS_CONFIG3 | (1U << CP0C3_M) | (1 << CP0C3_MSAP) |
+ (1 << CP0C3_BP) | (1 << CP0C3_BI) | (1 << CP0C3_ULRI) |
+ (1 << CP0C3_RXI) | (1 << CP0C3_LPA) | (1 << CP0C3_VInt),
+ .CP0_Config4 = MIPS_CONFIG4 | (1U << CP0C4_M) | (2 << CP0C4_IE) |
+ (1 << CP0C4_AE) | (0x1c << CP0C4_KScrExist),
+ .CP0_Config4_rw_bitmask = 0,
+ .CP0_Config5 = MIPS_CONFIG5 | (1 << CP0C5_CRCP) | (1 << CP0C5_NFExists),
+ .CP0_Config5_rw_bitmask = (1 << CP0C5_K) | (1 << CP0C5_CV) |
+ (1 << CP0C5_MSAEn) | (1 << CP0C5_UFE) |
+ (1 << CP0C5_FRE) | (1 << CP0C5_SBRI),
+ .CP0_Config6 = (1 << CP0C6_VCLRU) | (1 << CP0C6_DCLRU) |
+ (1 << CP0C6_SFBEN) | (1 << CP0C6_VLTINT) |
+ (1 << CP0C6_INSTPREF) | (1 << CP0C6_DATAPREF),
+ .CP0_Config6_rw_bitmask = (1 << CP0C6_BPPASS) | (0x3f << CP0C6_KPOS) |
+ (1 << CP0C6_KE) | (1 << CP0C6_VTLBONLY) |
+ (1 << CP0C6_LASX) | (1 << CP0C6_SSEN) |
+ (1 << CP0C6_DISDRTIME) | (1 << CP0C6_PIXNUEN) |
+ (1 << CP0C6_SCRAND) | (1 << CP0C6_LLEXCEN) |
+ (1 << CP0C6_DISVC) | (1 << CP0C6_VCLRU) |
+ (1 << CP0C6_DCLRU) | (1 << CP0C6_PIXUEN) |
+ (1 << CP0C6_DISBLKLYEN) | (1 << CP0C6_UMEMUALEN) |
+ (1 << CP0C6_SFBEN) | (1 << CP0C6_FLTINT) |
+ (1 << CP0C6_VLTINT) | (1 << CP0C6_DISBTB) |
+ (3 << CP0C6_STPREFCTL) | (1 << CP0C6_INSTPREF) |
+ (1 << CP0C6_DATAPREF),
+ .CP0_Config7 = 0,
+ .CP0_Config7_rw_bitmask = (1 << CP0C7_NAPCGEN) | (1 << CP0C7_UNIMUEN) |
+ (1 << CP0C7_VFPUCGEN),
+ .CP0_LLAddr_rw_bitmask = 1,
+ .SYNCI_Step = 16,
+ .CCRes = 2,
+ .CP0_Status_rw_bitmask = 0x7DDBFFFF,
+ .CP0_PageGrain = (1 << CP0PG_ELPA),
+ .CP0_PageGrain_rw_bitmask = (1U << CP0PG_RIE) | (1 << CP0PG_XIE) |
+ (1 << CP0PG_ELPA) | (1 << CP0PG_IEC),
+ .CP1_fcr0 = (0x5 << FCR0_PRID) | (0x1 << FCR0_REV) | (0x1 << FCR0_F64) |
+ (0x1 << FCR0_PS) | (0x1 << FCR0_L) | (0x1 << FCR0_W) |
+ (0x1 << FCR0_D) | (0x1 << FCR0_S),
+ .CP1_fcr31 = 0,
+ .CP1_fcr31_rw_bitmask = 0xFF83FFFF,
+ .SEGBITS = 48,
+ .PABITS = 48,
+ .insn_flags = CPU_LOONGSON3A,
+ .mmu_type = MMU_TYPE_R4000,
+ },
{
/* A generic CPU providing MIPS64 DSP R2 ASE features.
FIXME: Eventually this should be replaced by a real CPU model. */