aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-04-23 18:16:07 +0000
committerbellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>2005-04-23 18:16:07 +0000
commit111bfab3b5c6a47a7182e095647e6a5e0e17feb8 (patch)
tree7ff41315c47fc137d03a92d42acc7333965544f0
parentc7d344af8fb308975f941de1640b917e1b085a81 (diff)
This patch adds little-endian mode support to PPC emulation.
This is needed by OS/2 and Windows NT and some programs like VirtualPC. This patch has been tested using OS/2 bootloader (thanks to Tero Kaarlela). (Jocelyn Mayer) git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@1379 c046a42c-6fe2-441c-8c8c-71466251a162
-rw-r--r--cpu-exec.c3
-rw-r--r--disas.c2
-rw-r--r--target-ppc/op_helper_mem.h49
-rw-r--r--target-ppc/op_mem.h180
-rw-r--r--target-ppc/translate.c124
5 files changed, 331 insertions, 27 deletions
diff --git a/cpu-exec.c b/cpu-exec.c
index 59f127747..c83db7d64 100644
--- a/cpu-exec.c
+++ b/cpu-exec.c
@@ -364,7 +364,8 @@ int cpu_exec(CPUState *env1)
cs_base = env->npc;
pc = env->pc;
#elif defined(TARGET_PPC)
- flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) | (msr_se << MSR_SE);
+ flags = (msr_pr << MSR_PR) | (msr_fp << MSR_FP) |
+ (msr_se << MSR_SE) | (msr_le << MSR_LE);
cs_base = 0;
pc = env->nip;
#else
diff --git a/disas.c b/disas.c
index 7005716c1..8754713a8 100644
--- a/disas.c
+++ b/disas.c
@@ -141,6 +141,8 @@ void target_disas(FILE *out, target_ulong code, target_ulong size, int flags)
#elif defined(TARGET_SPARC)
print_insn = print_insn_sparc;
#elif defined(TARGET_PPC)
+ if (cpu_single_env->msr[MSR_LE])
+ disasm_info.endian = BFD_ENDIAN_LITTLE;
print_insn = print_insn_ppc;
#else
fprintf(out, "0x" TARGET_FMT_lx
diff --git a/target-ppc/op_helper_mem.h b/target-ppc/op_helper_mem.h
index 85ac91163..fa7f07676 100644
--- a/target-ppc/op_helper_mem.h
+++ b/target-ppc/op_helper_mem.h
@@ -40,4 +40,53 @@ void glue(do_stsw, MEMSUFFIX) (int src)
}
}
+void glue(do_lsw_le, MEMSUFFIX) (int dst)
+{
+ uint32_t tmp;
+ int sh;
+
+ if (loglevel > 0) {
+ fprintf(logfile, "%s: addr=0x%08x count=%d reg=%d\n",
+ __func__, T0, T1, dst);
+ }
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ tmp = glue(ldl, MEMSUFFIX)(T0);
+ ugpr(dst++) = ((tmp & 0xFF000000) >> 24) | ((tmp & 0x00FF0000) >> 8) |
+ ((tmp & 0x0000FF00) << 8) | ((tmp & 0x000000FF) << 24);
+ if (dst == 32)
+ dst = 0;
+ }
+ if (T1 > 0) {
+ tmp = 0;
+ for (sh = 0; T1 > 0; T1--, T0++, sh += 8) {
+ tmp |= glue(ldub, MEMSUFFIX)(T0) << sh;
+ }
+ ugpr(dst) = tmp;
+ }
+}
+
+void glue(do_stsw_le, MEMSUFFIX) (int src)
+{
+ uint32_t tmp;
+ int sh;
+
+ if (loglevel > 0) {
+ fprintf(logfile, "%s: addr=0x%08x count=%d reg=%d\n",
+ __func__, T0, T1, src);
+ }
+ for (; T1 > 3; T1 -= 4, T0 += 4) {
+ tmp = ((ugpr(src++) & 0xFF000000) >> 24);
+ tmp |= ((ugpr(src++) & 0x00FF0000) >> 8);
+ tmp |= ((ugpr(src++) & 0x0000FF00) << 8);
+ tmp |= ((ugpr(src++) & 0x000000FF) << 24);
+ glue(stl, MEMSUFFIX)(T0, tmp);
+ if (src == 32)
+ src = 0;
+ }
+ if (T1 > 0) {
+ for (sh = 0; T1 > 0; T1--, T0++, sh += 8)
+ glue(stb, MEMSUFFIX)(T0, (ugpr(src) >> sh) & 0xFF);
+ }
+}
+
#undef MEMSUFFIX
diff --git a/target-ppc/op_mem.h b/target-ppc/op_mem.h
index f0f0cd1b3..9b3f721e5 100644
--- a/target-ppc/op_mem.h
+++ b/target-ppc/op_mem.h
@@ -8,6 +8,12 @@ static inline uint16_t glue(ld16r, MEMSUFFIX) (target_ulong EA)
return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
}
+static inline int32_t glue(ld16rs, MEMSUFFIX) (target_ulong EA)
+{
+ int16_t tmp = glue(lduw, MEMSUFFIX)(EA);
+ return ((tmp & 0xFF00) >> 8) | ((tmp & 0x00FF) << 8);
+}
+
static inline uint32_t glue(ld32r, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp = glue(ldl, MEMSUFFIX)(EA);
@@ -48,17 +54,29 @@ PPC_LD_OP(ha, ldsw);
PPC_LD_OP(hz, lduw);
PPC_LD_OP(wz, ldl);
+PPC_LD_OP(ha_le, ld16rs);
+PPC_LD_OP(hz_le, ld16r);
+PPC_LD_OP(wz_le, ld32r);
+
/*** Integer store ***/
PPC_ST_OP(b, stb);
PPC_ST_OP(h, stw);
PPC_ST_OP(w, stl);
+PPC_ST_OP(h_le, st16r);
+PPC_ST_OP(w_le, st32r);
+
/*** Integer load and store with byte reverse ***/
PPC_LD_OP(hbr, ld16r);
PPC_LD_OP(wbr, ld32r);
PPC_ST_OP(hbr, st16r);
PPC_ST_OP(wbr, st32r);
+PPC_LD_OP(hbr_le, lduw);
+PPC_LD_OP(wbr_le, ldl);
+PPC_ST_OP(hbr_le, stw);
+PPC_ST_OP(wbr_le, stl);
+
/*** Integer load and store multiple ***/
PPC_OP(glue(lmw, MEMSUFFIX))
{
@@ -80,6 +98,26 @@ PPC_OP(glue(stmw, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lmw_le, MEMSUFFIX))
+{
+ int dst = PARAM(1);
+
+ for (; dst < 32; dst++, T0 += 4) {
+ ugpr(dst) = glue(ld32r, MEMSUFFIX)(T0);
+ }
+ RETURN();
+}
+
+PPC_OP(glue(stmw_le, MEMSUFFIX))
+{
+ int src = PARAM(1);
+
+ for (; src < 32; src++, T0 += 4) {
+ glue(st32r, MEMSUFFIX)(T0, ugpr(src));
+ }
+ RETURN();
+}
+
/*** Integer load and store strings ***/
PPC_OP(glue(lswi, MEMSUFFIX))
{
@@ -87,6 +125,13 @@ PPC_OP(glue(lswi, MEMSUFFIX))
RETURN();
}
+void glue(do_lsw_le, MEMSUFFIX) (int dst);
+PPC_OP(glue(lswi_le, MEMSUFFIX))
+{
+ glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ RETURN();
+}
+
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
@@ -105,12 +150,32 @@ PPC_OP(glue(lswx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lswx_le, MEMSUFFIX))
+{
+ if (T1 > 0) {
+ if ((PARAM(1) < PARAM(2) && (PARAM(1) + T1) > PARAM(2)) ||
+ (PARAM(1) < PARAM(3) && (PARAM(1) + T1) > PARAM(3))) {
+ do_raise_exception_err(EXCP_PROGRAM, EXCP_INVAL | EXCP_INVAL_LSWX);
+ } else {
+ glue(do_lsw_le, MEMSUFFIX)(PARAM(1));
+ }
+ }
+ RETURN();
+}
+
PPC_OP(glue(stsw, MEMSUFFIX))
{
glue(do_stsw, MEMSUFFIX)(PARAM(1));
RETURN();
}
+void glue(do_stsw_le, MEMSUFFIX) (int src);
+PPC_OP(glue(stsw_le, MEMSUFFIX))
+{
+ glue(do_stsw_le, MEMSUFFIX)(PARAM(1));
+ RETURN();
+}
+
/*** Floating-point store ***/
#define PPC_STF_OP(name, op) \
PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
@@ -122,6 +187,43 @@ PPC_OP(glue(glue(st, name), MEMSUFFIX)) \
PPC_STF_OP(fd, stfq);
PPC_STF_OP(fs, stfl);
+static inline void glue(stfqr, MEMSUFFIX) (target_ulong EA, double d)
+{
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = d;
+ u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+ ((u.u & 0x00FF000000000000ULL) >> 40) |
+ ((u.u & 0x0000FF0000000000ULL) >> 24) |
+ ((u.u & 0x000000FF00000000ULL) >> 8) |
+ ((u.u & 0x00000000FF000000ULL) << 8) |
+ ((u.u & 0x0000000000FF0000ULL) << 24) |
+ ((u.u & 0x000000000000FF00ULL) << 40) |
+ ((u.u & 0x00000000000000FFULL) << 56);
+ glue(stfq, MEMSUFFIX)(EA, u.d);
+}
+
+static inline void glue(stflr, MEMSUFFIX) (target_ulong EA, float f)
+{
+ union {
+ float f;
+ uint32_t u;
+ } u;
+
+ u.f = f;
+ u.u = ((u.u & 0xFF000000UL) >> 24) |
+ ((u.u & 0x00FF0000ULL) >> 8) |
+ ((u.u & 0x0000FF00UL) << 8) |
+ ((u.u & 0x000000FFULL) << 24);
+ glue(stfl, MEMSUFFIX)(EA, u.f);
+}
+
+PPC_STF_OP(fd_le, stfqr);
+PPC_STF_OP(fs_le, stflr);
+
/*** Floating-point load ***/
#define PPC_LDF_OP(name, op) \
PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
@@ -133,6 +235,45 @@ PPC_OP(glue(glue(l, name), MEMSUFFIX)) \
PPC_LDF_OP(fd, ldfq);
PPC_LDF_OP(fs, ldfl);
+static inline double glue(ldfqr, MEMSUFFIX) (target_ulong EA)
+{
+ union {
+ double d;
+ uint64_t u;
+ } u;
+
+ u.d = glue(ldfq, MEMSUFFIX)(EA);
+ u.u = ((u.u & 0xFF00000000000000ULL) >> 56) |
+ ((u.u & 0x00FF000000000000ULL) >> 40) |
+ ((u.u & 0x0000FF0000000000ULL) >> 24) |
+ ((u.u & 0x000000FF00000000ULL) >> 8) |
+ ((u.u & 0x00000000FF000000ULL) << 8) |
+ ((u.u & 0x0000000000FF0000ULL) << 24) |
+ ((u.u & 0x000000000000FF00ULL) << 40) |
+ ((u.u & 0x00000000000000FFULL) << 56);
+
+ return u.d;
+}
+
+static inline float glue(ldflr, MEMSUFFIX) (target_ulong EA)
+{
+ union {
+ float f;
+ uint32_t u;
+ } u;
+
+ u.f = glue(ldfl, MEMSUFFIX)(EA);
+ u.u = ((u.u & 0xFF000000UL) >> 24) |
+ ((u.u & 0x00FF0000ULL) >> 8) |
+ ((u.u & 0x0000FF00UL) << 8) |
+ ((u.u & 0x000000FFULL) << 24);
+
+ return u.f;
+}
+
+PPC_LDF_OP(fd_le, ldfqr);
+PPC_LDF_OP(fs_le, ldflr);
+
/* Load and set reservation */
PPC_OP(glue(lwarx, MEMSUFFIX))
{
@@ -145,6 +286,17 @@ PPC_OP(glue(lwarx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(lwarx_le, MEMSUFFIX))
+{
+ if (T0 & 0x03) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ T1 = glue(ld32r, MEMSUFFIX)(T0);
+ regs->reserve = T0;
+ }
+ RETURN();
+}
+
/* Store with reservation */
PPC_OP(glue(stwcx, MEMSUFFIX))
{
@@ -162,6 +314,22 @@ PPC_OP(glue(stwcx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(stwcx_le, MEMSUFFIX))
+{
+ if (T0 & 0x03) {
+ do_raise_exception(EXCP_ALIGN);
+ } else {
+ if (regs->reserve != T0) {
+ env->crf[0] = xer_ov;
+ } else {
+ glue(st32r, MEMSUFFIX)(T0, T1);
+ env->crf[0] = xer_ov | 0x02;
+ }
+ }
+ regs->reserve = 0;
+ RETURN();
+}
+
PPC_OP(glue(dcbz, MEMSUFFIX))
{
glue(stl, MEMSUFFIX)(T0 + 0x00, 0);
@@ -188,4 +356,16 @@ PPC_OP(glue(ecowx, MEMSUFFIX))
RETURN();
}
+PPC_OP(glue(eciwx_le, MEMSUFFIX))
+{
+ T1 = glue(ld32r, MEMSUFFIX)(T0);
+ RETURN();
+}
+
+PPC_OP(glue(ecowx_le, MEMSUFFIX))
+{
+ glue(st32r, MEMSUFFIX)(T0, T1);
+ RETURN();
+}
+
#undef MEMSUFFIX
diff --git a/target-ppc/translate.c b/target-ppc/translate.c
index 6cfbcc2b2..7d2e62b95 100644
--- a/target-ppc/translate.c
+++ b/target-ppc/translate.c
@@ -1046,22 +1046,41 @@ GEN_HANDLER(mtfsfi, 0x3F, 0x06, 0x04, 0x006f0800, PPC_FLOAT)
}
/*** Integer load ***/
+#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
#if defined(CONFIG_USER_ONLY)
-#define op_ldst(name) gen_op_##name##_raw()
-#define OP_LD_TABLE(width)
-#define OP_ST_TABLE(width)
+#define OP_LD_TABLE(width) \
+static GenOpFunc *gen_op_l##width[] = { \
+ &gen_op_l##width##_raw, \
+ &gen_op_l##width##_le_raw, \
+};
+#define OP_ST_TABLE(width) \
+static GenOpFunc *gen_op_st##width[] = { \
+ &gen_op_st##width##_raw, \
+ &gen_op_st##width##_le_raw, \
+};
+/* Byte access routine are endian safe */
+#define gen_op_stb_le_raw gen_op_stb_raw
+#define gen_op_lbz_le_raw gen_op_lbz_raw
#else
-#define op_ldst(name) (*gen_op_##name[ctx->mem_idx])()
#define OP_LD_TABLE(width) \
static GenOpFunc *gen_op_l##width[] = { \
&gen_op_l##width##_user, \
+ &gen_op_l##width##_le_user, \
&gen_op_l##width##_kernel, \
-}
+ &gen_op_l##width##_le_kernel, \
+};
#define OP_ST_TABLE(width) \
static GenOpFunc *gen_op_st##width[] = { \
&gen_op_st##width##_user, \
+ &gen_op_st##width##_le_user, \
&gen_op_st##width##_kernel, \
-}
+ &gen_op_st##width##_le_kernel, \
+};
+/* Byte access routine are endian safe */
+#define gen_op_stb_le_user gen_op_stb_user
+#define gen_op_lbz_le_user gen_op_lbz_user
+#define gen_op_stb_le_kernel gen_op_stb_kernel
+#define gen_op_lbz_le_kernel gen_op_lbz_kernel
#endif
#define GEN_LD(width, opc) \
@@ -1232,17 +1251,28 @@ OP_ST_TABLE(wbr);
GEN_STX(wbr, 0x16, 0x14);
/*** Integer load and store multiple ***/
+#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
#if defined(CONFIG_USER_ONLY)
-#define op_ldstm(name, reg) gen_op_##name##_raw(reg)
+static GenOpFunc1 *gen_op_lmw[] = {
+ &gen_op_lmw_raw,
+ &gen_op_lmw_le_raw,
+};
+static GenOpFunc1 *gen_op_stmw[] = {
+ &gen_op_stmw_raw,
+ &gen_op_stmw_le_raw,
+};
#else
-#define op_ldstm(name, reg) (*gen_op_##name[ctx->mem_idx])(reg)
static GenOpFunc1 *gen_op_lmw[] = {
&gen_op_lmw_user,
+ &gen_op_lmw_le_user,
&gen_op_lmw_kernel,
+ &gen_op_lmw_le_kernel,
};
static GenOpFunc1 *gen_op_stmw[] = {
&gen_op_stmw_user,
+ &gen_op_stmw_le_user,
&gen_op_stmw_kernel,
+ &gen_op_stmw_le_kernel,
};
#endif
@@ -1277,23 +1307,39 @@ GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
}
/*** Integer load and store strings ***/
-#if defined(CONFIG_USER_ONLY)
-#define op_ldsts(name, start) gen_op_##name##_raw(start)
-#define op_ldstsx(name, rd, ra, rb) gen_op_##name##_raw(rd, ra, rb)
-#else
#define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start)
#define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb)
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc1 *gen_op_lswi[] = {
+ &gen_op_lswi_raw,
+ &gen_op_lswi_le_raw,
+};
+static GenOpFunc3 *gen_op_lswx[] = {
+ &gen_op_lswx_raw,
+ &gen_op_lswx_le_raw,
+};
+static GenOpFunc1 *gen_op_stsw[] = {
+ &gen_op_stsw_raw,
+ &gen_op_stsw_le_raw,
+};
+#else
static GenOpFunc1 *gen_op_lswi[] = {
&gen_op_lswi_user,
+ &gen_op_lswi_le_user,
&gen_op_lswi_kernel,
+ &gen_op_lswi_le_kernel,
};
static GenOpFunc3 *gen_op_lswx[] = {
&gen_op_lswx_user,
+ &gen_op_lswx_le_user,
&gen_op_lswx_kernel,
+ &gen_op_lswx_le_kernel,
};
static GenOpFunc1 *gen_op_stsw[] = {
&gen_op_stsw_user,
+ &gen_op_stsw_le_user,
&gen_op_stsw_kernel,
+ &gen_op_stsw_le_kernel,
};
#endif
@@ -1389,23 +1435,33 @@ GEN_HANDLER(isync, 0x13, 0x16, 0xFF, 0x03FF0801, PPC_MEM)
{
}
-/* lwarx */
+#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
+#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
#if defined(CONFIG_USER_ONLY)
-#define op_lwarx() gen_op_lwarx_raw()
-#define op_stwcx() gen_op_stwcx_raw()
+static GenOpFunc *gen_op_lwarx[] = {
+ &gen_op_lwarx_raw,
+ &gen_op_lwarx_le_raw,
+};
+static GenOpFunc *gen_op_stwcx[] = {
+ &gen_op_stwcx_raw,
+ &gen_op_stwcx_le_raw,
+};
#else
-#define op_lwarx() (*gen_op_lwarx[ctx->mem_idx])()
static GenOpFunc *gen_op_lwarx[] = {
&gen_op_lwarx_user,
+ &gen_op_lwarx_le_user,
&gen_op_lwarx_kernel,
+ &gen_op_lwarx_le_kernel,
};
-#define op_stwcx() (*gen_op_stwcx[ctx->mem_idx])()
static GenOpFunc *gen_op_stwcx[] = {
&gen_op_stwcx_user,
+ &gen_op_stwcx_le_user,
&gen_op_stwcx_kernel,
+ &gen_op_stwcx_le_kernel,
};
#endif
+/* lwarx */
GEN_HANDLER(lwarx, 0x1F, 0x14, 0xFF, 0x00000001, PPC_RES)
{
if (rA(ctx->opcode) == 0) {
@@ -2498,23 +2554,33 @@ GEN_HANDLER(tlbsync, 0x1F, 0x16, 0x11, 0x03FFF801, PPC_MEM)
/*** External control ***/
/* Optional: */
-/* eciwx */
-#if defined(CONFIG_USER_ONLY)
-#define op_eciwx() gen_op_eciwx_raw()
-#define op_ecowx() gen_op_ecowx_raw()
-#else
#define op_eciwx() (*gen_op_eciwx[ctx->mem_idx])()
#define op_ecowx() (*gen_op_ecowx[ctx->mem_idx])()
+#if defined(CONFIG_USER_ONLY)
+static GenOpFunc *gen_op_eciwx[] = {
+ &gen_op_eciwx_raw,
+ &gen_op_eciwx_le_raw,
+};
+static GenOpFunc *gen_op_ecowx[] = {
+ &gen_op_ecowx_raw,
+ &gen_op_ecowx_le_raw,
+};
+#else
static GenOpFunc *gen_op_eciwx[] = {
&gen_op_eciwx_user,
+ &gen_op_eciwx_le_user,
&gen_op_eciwx_kernel,
+ &gen_op_eciwx_le_kernel,
};
static GenOpFunc *gen_op_ecowx[] = {
&gen_op_ecowx_user,
+ &gen_op_ecowx_le_user,
&gen_op_ecowx_kernel,
+ &gen_op_ecowx_le_kernel,
};
#endif
+/* eciwx */
GEN_HANDLER(eciwx, 0x1F, 0x16, 0x0D, 0x00000001, PPC_EXTERN)
{
/* Should check EAR[E] & alignment ! */
@@ -3143,10 +3209,10 @@ int gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
ctx.tb = tb;
ctx.exception = EXCP_NONE;
#if defined(CONFIG_USER_ONLY)
- ctx.mem_idx = 0;
+ ctx.mem_idx = msr_le;
#else
ctx.supervisor = 1 - msr_pr;
- ctx.mem_idx = 1 - msr_pr;
+ ctx.mem_idx = ((1 - msr_pr) << 1) | msr_le;
#endif
ctx.fpu_enabled = msr_fp;
#if defined (DO_SINGLE_STEP)
@@ -3173,11 +3239,17 @@ int gen_intermediate_code_internal (CPUState *env, TranslationBlock *tb,
}
#endif
ctx.opcode = ldl_code(ctx.nip);
+ if (msr_le) {
+ ctx.opcode = ((ctx.opcode & 0xFF000000) >> 24) |
+ ((ctx.opcode & 0x00FF0000) >> 8) |
+ ((ctx.opcode & 0x0000FF00) << 8) |
+ ((ctx.opcode & 0x000000FF) << 24);
+ }
#if defined PPC_DEBUG_DISAS
if (loglevel & CPU_LOG_TB_IN_ASM) {
- fprintf(logfile, "translate opcode %08x (%02x %02x %02x)\n",
+ fprintf(logfile, "translate opcode %08x (%02x %02x %02x) (%s)\n",
ctx.opcode, opc1(ctx.opcode), opc2(ctx.opcode),
- opc3(ctx.opcode));
+ opc3(ctx.opcode), msr_le ? "little" : "big");
}
#endif
ctx.nip += 4;