src/lj_asm.c - luajit-2.0-src

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /*
  2. ** IR assembler (SSA IR -> machine code).
  3. ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
  4. */

  5. #define lj_asm_c
  6. #define LUA_CORE

  7. #include "lj_obj.h"

  8. #if LJ_HASJIT

  9. #include "lj_gc.h"
  10. #include "lj_str.h"
  11. #include "lj_tab.h"
  12. #include "lj_frame.h"
  13. #if LJ_HASFFI
  14. #include "lj_ctype.h"
  15. #endif
  16. #include "lj_ir.h"
  17. #include "lj_jit.h"
  18. #include "lj_ircall.h"
  19. #include "lj_iropt.h"
  20. #include "lj_mcode.h"
  21. #include "lj_iropt.h"
  22. #include "lj_trace.h"
  23. #include "lj_snap.h"
  24. #include "lj_asm.h"
  25. #include "lj_dispatch.h"
  26. #include "lj_vm.h"
  27. #include "lj_target.h"

  28. #ifdef LUA_USE_ASSERT
  29. #include <stdio.h>
  30. #endif

  31. /* -- Assembler state and common macros ----------------------------------- */

  32. /* Assembler state. */
  33. typedef struct ASMState {
  34.   RegCost cost[RID_MAX];  /* Reference and blended allocation cost for regs. */

  35.   MCode *mcp;                /* Current MCode pointer (grows down). */
  36.   MCode *mclim;                /* Lower limit for MCode memory + red zone. */
  37. #ifdef LUA_USE_ASSERT
  38.   MCode *mcp_prev;        /* Red zone overflow check. */
  39. #endif

  40.   IRIns *ir;                /* Copy of pointer to IR instructions/constants. */
  41.   jit_State *J;                /* JIT compiler state. */

  42. #if LJ_TARGET_X86ORX64
  43.   x86ModRM mrm;                /* Fused x86 address operand. */
  44. #endif

  45.   RegSet freeset;        /* Set of free registers. */
  46.   RegSet modset;        /* Set of registers modified inside the loop. */
  47.   RegSet weakset;        /* Set of weakly referenced registers. */
  48.   RegSet phiset;        /* Set of PHI registers. */

  49.   uint32_t flags;        /* Copy of JIT compiler flags. */
  50.   int loopinv;                /* Loop branch inversion (0:no, 1:yes, 2:yes+CC_P). */

  51.   int32_t evenspill;        /* Next even spill slot. */
  52.   int32_t oddspill;        /* Next odd spill slot (or 0). */

  53.   IRRef curins;                /* Reference of current instruction. */
  54.   IRRef stopins;        /* Stop assembly before hitting this instruction. */
  55.   IRRef orignins;        /* Original T->nins. */

  56.   IRRef snapref;        /* Current snapshot is active after this reference. */
  57.   IRRef snaprename;        /* Rename highwater mark for snapshot check. */
  58.   SnapNo snapno;        /* Current snapshot number. */
  59.   SnapNo loopsnapno;        /* Loop snapshot number. */

  60.   IRRef fuseref;        /* Fusion limit (loopref, 0 or FUSE_DISABLED). */
  61.   IRRef sectref;        /* Section base reference (loopref or 0). */
  62.   IRRef loopref;        /* Reference of LOOP instruction (or 0). */

  63.   BCReg topslot;        /* Number of slots for stack check (unless 0). */
  64.   int32_t gcsteps;        /* Accumulated number of GC steps (per section). */

  65.   GCtrace *T;                /* Trace to assemble. */
  66.   GCtrace *parent;        /* Parent trace (or NULL). */

  67.   MCode *mcbot;                /* Bottom of reserved MCode. */
  68.   MCode *mctop;                /* Top of generated MCode. */
  69.   MCode *mcloop;        /* Pointer to loop MCode (or NULL). */
  70.   MCode *invmcp;        /* Points to invertible loop branch (or NULL). */
  71.   MCode *flagmcp;        /* Pending opportunity to merge flag setting ins. */
  72.   MCode *realign;        /* Realign loop if not NULL. */

  73. #ifdef RID_NUM_KREF
  74.   int32_t krefk[RID_NUM_KREF];
  75. #endif
  76.   IRRef1 phireg[RID_MAX];  /* PHI register references. */
  77.   uint16_t parentmap[LJ_MAX_JSLOTS];  /* Parent instruction to RegSP map. */
  78. } ASMState;

  79. #define IR(ref)                        (&as->ir[(ref)])

  80. #define ASMREF_TMP1                REF_TRUE        /* Temp. register. */
  81. #define ASMREF_TMP2                REF_FALSE        /* Temp. register. */
  82. #define ASMREF_L                REF_NIL                /* Stores register for L. */

  83. /* Check for variant to invariant references. */
  84. #define iscrossref(as, ref)        ((ref) < as->sectref)

  85. /* Inhibit memory op fusion from variant to invariant references. */
  86. #define FUSE_DISABLED                (~(IRRef)0)
  87. #define mayfuse(as, ref)        ((ref) > as->fuseref)
  88. #define neverfuse(as)                (as->fuseref == FUSE_DISABLED)
  89. #define canfuse(as, ir)                (!neverfuse(as) && !irt_isphi((ir)->t))
  90. #define opisfusableload(o) \
  91.   ((o) == IR_ALOAD || (o) == IR_HLOAD || (o) == IR_ULOAD || \
  92.    (o) == IR_FLOAD || (o) == IR_XLOAD || (o) == IR_SLOAD || (o) == IR_VLOAD)

  93. /* Sparse limit checks using a red zone before the actual limit. */
  94. #define MCLIM_REDZONE        64

  95. static LJ_NORET LJ_NOINLINE void asm_mclimit(ASMState *as)
  96. {
  97.   lj_mcode_limiterr(as->J, (size_t)(as->mctop - as->mcp + 4*MCLIM_REDZONE));
  98. }

  99. static LJ_AINLINE void checkmclim(ASMState *as)
  100. {
  101. #ifdef LUA_USE_ASSERT
  102.   if (as->mcp + MCLIM_REDZONE < as->mcp_prev) {
  103.     IRIns *ir = IR(as->curins+1);
  104.     fprintf(stderr, "RED ZONE OVERFLOW: %p IR %04d  %02d %04d %04d\n", as->mcp,
  105.             as->curins+1-REF_BIAS, ir->o, ir->op1-REF_BIAS, ir->op2-REF_BIAS);
  106.     lua_assert(0);
  107.   }
  108. #endif
  109.   if (LJ_UNLIKELY(as->mcp < as->mclim)) asm_mclimit(as);
  110. #ifdef LUA_USE_ASSERT
  111.   as->mcp_prev = as->mcp;
  112. #endif
  113. }

  114. #ifdef RID_NUM_KREF
  115. #define ra_iskref(ref)                ((ref) < RID_NUM_KREF)
  116. #define ra_krefreg(ref)                ((Reg)(RID_MIN_KREF + (Reg)(ref)))
  117. #define ra_krefk(as, ref)        (as->krefk[(ref)])

  118. static LJ_AINLINE void ra_setkref(ASMState *as, Reg r, int32_t k)
  119. {
  120.   IRRef ref = (IRRef)(r - RID_MIN_KREF);
  121.   as->krefk[ref] = k;
  122.   as->cost[r] = REGCOST(ref, ref);
  123. }

  124. #else
  125. #define ra_iskref(ref)                0
  126. #define ra_krefreg(ref)                RID_MIN_GPR
  127. #define ra_krefk(as, ref)        0
  128. #endif

  129. /* Arch-specific field offsets. */
  130. static const uint8_t field_ofs[IRFL__MAX+1] = {
  131. #define FLOFS(name, ofs)        (uint8_t)(ofs),
  132. IRFLDEF(FLOFS)
  133. #undef FLOFS
  134.   0
  135. };

  136. /* -- Target-specific instruction emitter --------------------------------- */

  137. #if LJ_TARGET_X86ORX64
  138. #include "lj_emit_x86.h"
  139. #elif LJ_TARGET_ARM
  140. #include "lj_emit_arm.h"
  141. #elif LJ_TARGET_PPC
  142. #include "lj_emit_ppc.h"
  143. #elif LJ_TARGET_MIPS
  144. #include "lj_emit_mips.h"
  145. #else
  146. #error "Missing instruction emitter for target CPU"
  147. #endif

  148. /* Generic load/store of register from/to stack slot. */
  149. #define emit_spload(as, ir, r, ofs) \
  150.   emit_loadofs(as, ir, (r), RID_SP, (ofs))
  151. #define emit_spstore(as, ir, r, ofs) \
  152.   emit_storeofs(as, ir, (r), RID_SP, (ofs))

  153. /* -- Register allocator debugging ---------------------------------------- */

  154. /* #define LUAJIT_DEBUG_RA */

  155. #ifdef LUAJIT_DEBUG_RA

  156. #include <stdio.h>
  157. #include <stdarg.h>

  158. #define RIDNAME(name)        #name,
  159. static const char *const ra_regname[] = {
  160.   GPRDEF(RIDNAME)
  161.   FPRDEF(RIDNAME)
  162.   VRIDDEF(RIDNAME)
  163.   NULL
  164. };
  165. #undef RIDNAME

  166. static char ra_dbg_buf[65536];
  167. static char *ra_dbg_p;
  168. static char *ra_dbg_merge;
  169. static MCode *ra_dbg_mcp;

  170. static void ra_dstart(void)
  171. {
  172.   ra_dbg_p = ra_dbg_buf;
  173.   ra_dbg_merge = NULL;
  174.   ra_dbg_mcp = NULL;
  175. }

  176. static void ra_dflush(void)
  177. {
  178.   fwrite(ra_dbg_buf, 1, (size_t)(ra_dbg_p-ra_dbg_buf), stdout);
  179.   ra_dstart();
  180. }

  181. static void ra_dprintf(ASMState *as, const char *fmt, ...)
  182. {
  183.   char *p;
  184.   va_list argp;
  185.   va_start(argp, fmt);
  186.   p = ra_dbg_mcp == as->mcp ? ra_dbg_merge : ra_dbg_p;
  187.   ra_dbg_mcp = NULL;
  188.   p += sprintf(p, "%08x  \e[36m%04d ", (uintptr_t)as->mcp, as->curins-REF_BIAS);
  189.   for (;;) {
  190.     const char *e = strchr(fmt, '$');
  191.     if (e == NULL) break;
  192.     memcpy(p, fmt, (size_t)(e-fmt));
  193.     p += e-fmt;
  194.     if (e[1] == 'r') {
  195.       Reg r = va_arg(argp, Reg) & RID_MASK;
  196.       if (r <= RID_MAX) {
  197.         const char *q;
  198.         for (q = ra_regname[r]; *q; q++)
  199.           *p++ = *q >= 'A' && *q <= 'Z' ? *q + 0x20 : *q;
  200.       } else {
  201.         *p++ = '?';
  202.         lua_assert(0);
  203.       }
  204.     } else if (e[1] == 'f' || e[1] == 'i') {
  205.       IRRef ref;
  206.       if (e[1] == 'f')
  207.         ref = va_arg(argp, IRRef);
  208.       else
  209.         ref = va_arg(argp, IRIns *) - as->ir;
  210.       if (ref >= REF_BIAS)
  211.         p += sprintf(p, "%04d", ref - REF_BIAS);
  212.       else
  213.         p += sprintf(p, "K%03d", REF_BIAS - ref);
  214.     } else if (e[1] == 's') {
  215.       uint32_t slot = va_arg(argp, uint32_t);
  216.       p += sprintf(p, "[sp+0x%x]", sps_scale(slot));
  217.     } else if (e[1] == 'x') {
  218.       p += sprintf(p, "%08x", va_arg(argp, int32_t));
  219.     } else {
  220.       lua_assert(0);
  221.     }
  222.     fmt = e+2;
  223.   }
  224.   va_end(argp);
  225.   while (*fmt)
  226.     *p++ = *fmt++;
  227.   *p++ = '\e'; *p++ = '['; *p++ = 'm'; *p++ = '\n';
  228.   if (p > ra_dbg_buf+sizeof(ra_dbg_buf)-256) {
  229.     fwrite(ra_dbg_buf, 1, (size_t)(p-ra_dbg_buf), stdout);
  230.     p = ra_dbg_buf;
  231.   }
  232.   ra_dbg_p = p;
  233. }

  234. #define RA_DBG_START()        ra_dstart()
  235. #define RA_DBG_FLUSH()        ra_dflush()
  236. #define RA_DBG_REF() \
  237.   do { char *_p = ra_dbg_p; ra_dprintf(as, ""); \
  238.        ra_dbg_merge = _p; ra_dbg_mcp = as->mcp; } while (0)
  239. #define RA_DBGX(x)        ra_dprintf x

  240. #else
  241. #define RA_DBG_START()        ((void)0)
  242. #define RA_DBG_FLUSH()        ((void)0)
  243. #define RA_DBG_REF()        ((void)0)
  244. #define RA_DBGX(x)        ((void)0)
  245. #endif

  246. /* -- Register allocator -------------------------------------------------- */

  247. #define ra_free(as, r)                rset_set(as->freeset, (r))
  248. #define ra_modified(as, r)        rset_set(as->modset, (r))
  249. #define ra_weak(as, r)                rset_set(as->weakset, (r))
  250. #define ra_noweak(as, r)        rset_clear(as->weakset, (r))

  251. #define ra_used(ir)                (ra_hasreg((ir)->r) || ra_hasspill((ir)->s))

  252. /* Setup register allocator. */
  253. static void ra_setup(ASMState *as)
  254. {
  255.   Reg r;
  256.   /* Initially all regs (except the stack pointer) are free for use. */
  257.   as->freeset = RSET_INIT;
  258.   as->modset = RSET_EMPTY;
  259.   as->weakset = RSET_EMPTY;
  260.   as->phiset = RSET_EMPTY;
  261.   memset(as->phireg, 0, sizeof(as->phireg));
  262.   for (r = RID_MIN_GPR; r < RID_MAX; r++)
  263.     as->cost[r] = REGCOST(~0u, 0u);
  264. }

  265. /* Rematerialize constants. */
  266. static Reg ra_rematk(ASMState *as, IRRef ref)
  267. {
  268.   IRIns *ir;
  269.   Reg r;
  270.   if (ra_iskref(ref)) {
  271.     r = ra_krefreg(ref);
  272.     lua_assert(!rset_test(as->freeset, r));
  273.     ra_free(as, r);
  274.     ra_modified(as, r);
  275.     emit_loadi(as, r, ra_krefk(as, ref));
  276.     return r;
  277.   }
  278.   ir = IR(ref);
  279.   r = ir->r;
  280.   lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
  281.   ra_free(as, r);
  282.   ra_modified(as, r);
  283.   ir->r = RID_INIT/* Do not keep any hint. */
  284.   RA_DBGX((as, "remat     $i $r", ir, r));
  285. #if !LJ_SOFTFP
  286.   if (ir->o == IR_KNUM) {
  287.     emit_loadn(as, r, ir_knum(ir));
  288.   } else
  289. #endif
  290.   if (emit_canremat(REF_BASE) && ir->o == IR_BASE) {
  291.     ra_sethint(ir->r, RID_BASE);  /* Restore BASE register hint. */
  292.     emit_getgl(as, r, jit_base);
  293.   } else if (emit_canremat(ASMREF_L) && ir->o == IR_KPRI) {
  294.     lua_assert(irt_isnil(ir->t));  /* REF_NIL stores ASMREF_L register. */
  295.     emit_getgl(as, r, cur_L);
  296. #if LJ_64
  297.   } else if (ir->o == IR_KINT64) {
  298.     emit_loadu64(as, r, ir_kint64(ir)->u64);
  299. #endif
  300.   } else {
  301.     lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
  302.                ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
  303.     emit_loadi(as, r, ir->i);
  304.   }
  305.   return r;
  306. }

  307. /* Force a spill. Allocate a new spill slot if needed. */
  308. static int32_t ra_spill(ASMState *as, IRIns *ir)
  309. {
  310.   int32_t slot = ir->s;
  311.   lua_assert(ir >= as->ir + REF_TRUE);
  312.   if (!ra_hasspill(slot)) {
  313.     if (irt_is64(ir->t)) {
  314.       slot = as->evenspill;
  315.       as->evenspill += 2;
  316.     } else if (as->oddspill) {
  317.       slot = as->oddspill;
  318.       as->oddspill = 0;
  319.     } else {
  320.       slot = as->evenspill;
  321.       as->oddspill = slot+1;
  322.       as->evenspill += 2;
  323.     }
  324.     if (as->evenspill > 256)
  325.       lj_trace_err(as->J, LJ_TRERR_SPILLOV);
  326.     ir->s = (uint8_t)slot;
  327.   }
  328.   return sps_scale(slot);
  329. }

  330. /* Release the temporarily allocated register in ASMREF_TMP1/ASMREF_TMP2. */
  331. static Reg ra_releasetmp(ASMState *as, IRRef ref)
  332. {
  333.   IRIns *ir = IR(ref);
  334.   Reg r = ir->r;
  335.   lua_assert(ra_hasreg(r) && !ra_hasspill(ir->s));
  336.   ra_free(as, r);
  337.   ra_modified(as, r);
  338.   ir->r = RID_INIT;
  339.   return r;
  340. }

  341. /* Restore a register (marked as free). Rematerialize or force a spill. */
  342. static Reg ra_restore(ASMState *as, IRRef ref)
  343. {
  344.   if (emit_canremat(ref)) {
  345.     return ra_rematk(as, ref);
  346.   } else {
  347.     IRIns *ir = IR(ref);
  348.     int32_t ofs = ra_spill(as, ir);  /* Force a spill slot. */
  349.     Reg r = ir->r;
  350.     lua_assert(ra_hasreg(r));
  351.     ra_sethint(ir->r, r);  /* Keep hint. */
  352.     ra_free(as, r);
  353.     if (!rset_test(as->weakset, r)) {  /* Only restore non-weak references. */
  354.       ra_modified(as, r);
  355.       RA_DBGX((as, "restore   $i $r", ir, r));
  356.       emit_spload(as, ir, r, ofs);
  357.     }
  358.     return r;
  359.   }
  360. }

  361. /* Save a register to a spill slot. */
  362. static void ra_save(ASMState *as, IRIns *ir, Reg r)
  363. {
  364.   RA_DBGX((as, "save      $i $r", ir, r));
  365.   emit_spstore(as, ir, r, sps_scale(ir->s));
  366. }

  367. #define MINCOST(name) \
  368.   if (rset_test(RSET_ALL, RID_##name) && \
  369.       LJ_LIKELY(allow&RID2RSET(RID_##name)) && as->cost[RID_##name] < cost) \
  370.     cost = as->cost[RID_##name];

  371. /* Evict the register with the lowest cost, forcing a restore. */
  372. static Reg ra_evict(ASMState *as, RegSet allow)
  373. {
  374.   IRRef ref;
  375.   RegCost cost = ~(RegCost)0;
  376.   lua_assert(allow != RSET_EMPTY);
  377.   if (RID_NUM_FPR == 0 || allow < RID2RSET(RID_MAX_GPR)) {
  378.     GPRDEF(MINCOST)
  379.   } else {
  380.     FPRDEF(MINCOST)
  381.   }
  382.   ref = regcost_ref(cost);
  383.   lua_assert(ra_iskref(ref) || (ref >= as->T->nk && ref < as->T->nins));
  384.   /* Preferably pick any weak ref instead of a non-weak, non-const ref. */
  385.   if (!irref_isk(ref) && (as->weakset & allow)) {
  386.     IRIns *ir = IR(ref);
  387.     if (!rset_test(as->weakset, ir->r))
  388.       ref = regcost_ref(as->cost[rset_pickbot((as->weakset & allow))]);
  389.   }
  390.   return ra_restore(as, ref);
  391. }

  392. /* Pick any register (marked as free). Evict on-demand. */
  393. static Reg ra_pick(ASMState *as, RegSet allow)
  394. {
  395.   RegSet pick = as->freeset & allow;
  396.   if (!pick)
  397.     return ra_evict(as, allow);
  398.   else
  399.     return rset_picktop(pick);
  400. }

  401. /* Get a scratch register (marked as free). */
  402. static Reg ra_scratch(ASMState *as, RegSet allow)
  403. {
  404.   Reg r = ra_pick(as, allow);
  405.   ra_modified(as, r);
  406.   RA_DBGX((as, "scratch        $r", r));
  407.   return r;
  408. }

  409. /* Evict all registers from a set (if not free). */
  410. static void ra_evictset(ASMState *as, RegSet drop)
  411. {
  412.   RegSet work;
  413.   as->modset |= drop;
  414. #if !LJ_SOFTFP
  415.   work = (drop & ~as->freeset) & RSET_FPR;
  416.   while (work) {
  417.     Reg r = rset_pickbot(work);
  418.     ra_restore(as, regcost_ref(as->cost[r]));
  419.     rset_clear(work, r);
  420.     checkmclim(as);
  421.   }
  422. #endif
  423.   work = (drop & ~as->freeset);
  424.   while (work) {
  425.     Reg r = rset_pickbot(work);
  426.     ra_restore(as, regcost_ref(as->cost[r]));
  427.     rset_clear(work, r);
  428.     checkmclim(as);
  429.   }
  430. }

  431. /* Evict (rematerialize) all registers allocated to constants. */
  432. static void ra_evictk(ASMState *as)
  433. {
  434.   RegSet work;
  435. #if !LJ_SOFTFP
  436.   work = ~as->freeset & RSET_FPR;
  437.   while (work) {
  438.     Reg r = rset_pickbot(work);
  439.     IRRef ref = regcost_ref(as->cost[r]);
  440.     if (emit_canremat(ref) && irref_isk(ref)) {
  441.       ra_rematk(as, ref);
  442.       checkmclim(as);
  443.     }
  444.     rset_clear(work, r);
  445.   }
  446. #endif
  447.   work = ~as->freeset & RSET_GPR;
  448.   while (work) {
  449.     Reg r = rset_pickbot(work);
  450.     IRRef ref = regcost_ref(as->cost[r]);
  451.     if (emit_canremat(ref) && irref_isk(ref)) {
  452.       ra_rematk(as, ref);
  453.       checkmclim(as);
  454.     }
  455.     rset_clear(work, r);
  456.   }
  457. }

  458. #ifdef RID_NUM_KREF
  459. /* Allocate a register for a constant. */
  460. static Reg ra_allock(ASMState *as, int32_t k, RegSet allow)
  461. {
  462.   /* First try to find a register which already holds the same constant. */
  463.   RegSet pick, work = ~as->freeset & RSET_GPR;
  464.   Reg r;
  465.   while (work) {
  466.     IRRef ref;
  467.     r = rset_pickbot(work);
  468.     ref = regcost_ref(as->cost[r]);
  469.     if (ref < ASMREF_L &&
  470.         k == (ra_iskref(ref) ? ra_krefk(as, ref) : IR(ref)->i))
  471.       return r;
  472.     rset_clear(work, r);
  473.   }
  474.   pick = as->freeset & allow;
  475.   if (pick) {
  476.     /* Constants should preferably get unmodified registers. */
  477.     if ((pick & ~as->modset))
  478.       pick &= ~as->modset;
  479.     r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
  480.   } else {
  481.     r = ra_evict(as, allow);
  482.   }
  483.   RA_DBGX((as, "allock    $x $r", k, r));
  484.   ra_setkref(as, r, k);
  485.   rset_clear(as->freeset, r);
  486.   ra_noweak(as, r);
  487.   return r;
  488. }

  489. /* Allocate a specific register for a constant. */
  490. static void ra_allockreg(ASMState *as, int32_t k, Reg r)
  491. {
  492.   Reg kr = ra_allock(as, k, RID2RSET(r));
  493.   if (kr != r) {
  494.     IRIns irdummy;
  495.     irdummy.t.irt = IRT_INT;
  496.     ra_scratch(as, RID2RSET(r));
  497.     emit_movrr(as, &irdummy, r, kr);
  498.   }
  499. }
  500. #else
  501. #define ra_allockreg(as, k, r)                emit_loadi(as, (r), (k))
  502. #endif

  503. /* Allocate a register for ref from the allowed set of registers.
  504. ** Note: this function assumes the ref does NOT have a register yet!
  505. ** Picks an optimal register, sets the cost and marks the register as non-free.
  506. */
  507. static Reg ra_allocref(ASMState *as, IRRef ref, RegSet allow)
  508. {
  509.   IRIns *ir = IR(ref);
  510.   RegSet pick = as->freeset & allow;
  511.   Reg r;
  512.   lua_assert(ra_noreg(ir->r));
  513.   if (pick) {
  514.     /* First check register hint from propagation or PHI. */
  515.     if (ra_hashint(ir->r)) {
  516.       r = ra_gethint(ir->r);
  517.       if (rset_test(pick, r))  /* Use hint register if possible. */
  518.         goto found;
  519.       /* Rematerialization is cheaper than missing a hint. */
  520.       if (rset_test(allow, r) && emit_canremat(regcost_ref(as->cost[r]))) {
  521.         ra_rematk(as, regcost_ref(as->cost[r]));
  522.         goto found;
  523.       }
  524.       RA_DBGX((as, "hintmiss  $f $r", ref, r));
  525.     }
  526.     /* Invariants should preferably get unmodified registers. */
  527.     if (ref < as->loopref && !irt_isphi(ir->t)) {
  528.       if ((pick & ~as->modset))
  529.         pick &= ~as->modset;
  530.       r = rset_pickbot(pick);  /* Reduce conflicts with inverse allocation. */
  531.     } else {
  532.       /* We've got plenty of regs, so get callee-save regs if possible. */
  533.       if (RID_NUM_GPR > 8 && (pick & ~RSET_SCRATCH))
  534.         pick &= ~RSET_SCRATCH;
  535.       r = rset_picktop(pick);
  536.     }
  537.   } else {
  538.     r = ra_evict(as, allow);
  539.   }
  540. found:
  541.   RA_DBGX((as, "alloc     $f $r", ref, r));
  542.   ir->r = (uint8_t)r;
  543.   rset_clear(as->freeset, r);
  544.   ra_noweak(as, r);
  545.   as->cost[r] = REGCOST_REF_T(ref, irt_t(ir->t));
  546.   return r;
  547. }

  548. /* Allocate a register on-demand. */
  549. static Reg ra_alloc1(ASMState *as, IRRef ref, RegSet allow)
  550. {
  551.   Reg r = IR(ref)->r;
  552.   /* Note: allow is ignored if the register is already allocated. */
  553.   if (ra_noreg(r)) r = ra_allocref(as, ref, allow);
  554.   ra_noweak(as, r);
  555.   return r;
  556. }

  557. /* Rename register allocation and emit move. */
  558. static void ra_rename(ASMState *as, Reg down, Reg up)
  559. {
  560.   IRRef ren, ref = regcost_ref(as->cost[up] = as->cost[down]);
  561.   IRIns *ir = IR(ref);
  562.   ir->r = (uint8_t)up;
  563.   as->cost[down] = 0;
  564.   lua_assert((down < RID_MAX_GPR) == (up < RID_MAX_GPR));
  565.   lua_assert(!rset_test(as->freeset, down) && rset_test(as->freeset, up));
  566.   ra_free(as, down);  /* 'down' is free ... */
  567.   ra_modified(as, down);
  568.   rset_clear(as->freeset, up);  /* ... and 'up' is now allocated. */
  569.   ra_noweak(as, up);
  570.   RA_DBGX((as, "rename    $f $r $r", regcost_ref(as->cost[up]), down, up));
  571.   emit_movrr(as, ir, down, up);  /* Backwards codegen needs inverse move. */
  572.   if (!ra_hasspill(IR(ref)->s)) {  /* Add the rename to the IR. */
  573.     lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), ref, as->snapno);
  574.     ren = tref_ref(lj_ir_emit(as->J));
  575.     as->ir = as->T->ir;  /* The IR may have been reallocated. */
  576.     IR(ren)->r = (uint8_t)down;
  577.     IR(ren)->s = SPS_NONE;
  578.   }
  579. }

  580. /* Pick a destination register (marked as free).
  581. ** Caveat: allow is ignored if there's already a destination register.
  582. ** Use ra_destreg() to get a specific register.
  583. */
  584. static Reg ra_dest(ASMState *as, IRIns *ir, RegSet allow)
  585. {
  586.   Reg dest = ir->r;
  587.   if (ra_hasreg(dest)) {
  588.     ra_free(as, dest);
  589.     ra_modified(as, dest);
  590.   } else {
  591.     if (ra_hashint(dest) && rset_test((as->freeset&allow), ra_gethint(dest))) {
  592.       dest = ra_gethint(dest);
  593.       ra_modified(as, dest);
  594.       RA_DBGX((as, "dest           $r", dest));
  595.     } else {
  596.       dest = ra_scratch(as, allow);
  597.     }
  598.     ir->r = dest;
  599.   }
  600.   if (LJ_UNLIKELY(ra_hasspill(ir->s))) ra_save(as, ir, dest);
  601.   return dest;
  602. }

  603. /* Force a specific destination register (marked as free). */
  604. static void ra_destreg(ASMState *as, IRIns *ir, Reg r)
  605. {
  606.   Reg dest = ra_dest(as, ir, RID2RSET(r));
  607.   if (dest != r) {
  608.     lua_assert(rset_test(as->freeset, r));
  609.     ra_modified(as, r);
  610.     emit_movrr(as, ir, dest, r);
  611.   }
  612. }

  613. #if LJ_TARGET_X86ORX64
  614. /* Propagate dest register to left reference. Emit moves as needed.
  615. ** This is a required fixup step for all 2-operand machine instructions.
  616. */
  617. static void ra_left(ASMState *as, Reg dest, IRRef lref)
  618. {
  619.   IRIns *ir = IR(lref);
  620.   Reg left = ir->r;
  621.   if (ra_noreg(left)) {
  622.     if (irref_isk(lref)) {
  623.       if (ir->o == IR_KNUM) {
  624.         cTValue *tv = ir_knum(ir);
  625.         /* FP remat needs a load except for +0. Still better than eviction. */
  626.         if (tvispzero(tv) || !(as->freeset & RSET_FPR)) {
  627.           emit_loadn(as, dest, tv);
  628.           return;
  629.         }
  630. #if LJ_64
  631.       } else if (ir->o == IR_KINT64) {
  632.         emit_loadu64(as, dest, ir_kint64(ir)->u64);
  633.         return;
  634. #endif
  635.       } else if (ir->o != IR_KPRI) {
  636.         lua_assert(ir->o == IR_KINT || ir->o == IR_KGC ||
  637.                    ir->o == IR_KPTR || ir->o == IR_KKPTR || ir->o == IR_KNULL);
  638.         emit_loadi(as, dest, ir->i);
  639.         return;
  640.       }
  641.     }
  642.     if (!ra_hashint(left) && !iscrossref(as, lref))
  643.       ra_sethint(ir->r, dest);  /* Propagate register hint. */
  644.     left = ra_allocref(as, lref, dest < RID_MAX_GPR ? RSET_GPR : RSET_FPR);
  645.   }
  646.   ra_noweak(as, left);
  647.   /* Move needed for true 3-operand instruction: y=a+b ==> y=a; y+=b. */
  648.   if (dest != left) {
  649.     /* Use register renaming if dest is the PHI reg. */
  650.     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
  651.       ra_modified(as, left);
  652.       ra_rename(as, left, dest);
  653.     } else {
  654.       emit_movrr(as, ir, dest, left);
  655.     }
  656.   }
  657. }
  658. #else
  659. /* Similar to ra_left, except we override any hints. */
  660. static void ra_leftov(ASMState *as, Reg dest, IRRef lref)
  661. {
  662.   IRIns *ir = IR(lref);
  663.   Reg left = ir->r;
  664.   if (ra_noreg(left)) {
  665.     ra_sethint(ir->r, dest);  /* Propagate register hint. */
  666.     left = ra_allocref(as, lref,
  667.                        (LJ_SOFTFP || dest < RID_MAX_GPR) ? RSET_GPR : RSET_FPR);
  668.   }
  669.   ra_noweak(as, left);
  670.   if (dest != left) {
  671.     /* Use register renaming if dest is the PHI reg. */
  672.     if (irt_isphi(ir->t) && as->phireg[dest] == lref) {
  673.       ra_modified(as, left);
  674.       ra_rename(as, left, dest);
  675.     } else {
  676.       emit_movrr(as, ir, dest, left);
  677.     }
  678.   }
  679. }
  680. #endif

  681. #if !LJ_64
  682. /* Force a RID_RETLO/RID_RETHI destination register pair (marked as free). */
  683. static void ra_destpair(ASMState *as, IRIns *ir)
  684. {
  685.   Reg destlo = ir->r, desthi = (ir+1)->r;
  686.   /* First spill unrelated refs blocking the destination registers. */
  687.   if (!rset_test(as->freeset, RID_RETLO) &&
  688.       destlo != RID_RETLO && desthi != RID_RETLO)
  689.     ra_restore(as, regcost_ref(as->cost[RID_RETLO]));
  690.   if (!rset_test(as->freeset, RID_RETHI) &&
  691.       destlo != RID_RETHI && desthi != RID_RETHI)
  692.     ra_restore(as, regcost_ref(as->cost[RID_RETHI]));
  693.   /* Next free the destination registers (if any). */
  694.   if (ra_hasreg(destlo)) {
  695.     ra_free(as, destlo);
  696.     ra_modified(as, destlo);
  697.   } else {
  698.     destlo = RID_RETLO;
  699.   }
  700.   if (ra_hasreg(desthi)) {
  701.     ra_free(as, desthi);
  702.     ra_modified(as, desthi);
  703.   } else {
  704.     desthi = RID_RETHI;
  705.   }
  706.   /* Check for conflicts and shuffle the registers as needed. */
  707.   if (destlo == RID_RETHI) {
  708.     if (desthi == RID_RETLO) {
  709. #if LJ_TARGET_X86
  710.       *--as->mcp = XI_XCHGa + RID_RETHI;
  711. #else
  712.       emit_movrr(as, ir, RID_RETHI, RID_TMP);
  713.       emit_movrr(as, ir, RID_RETLO, RID_RETHI);
  714.       emit_movrr(as, ir, RID_TMP, RID_RETLO);
  715. #endif
  716.     } else {
  717.       emit_movrr(as, ir, RID_RETHI, RID_RETLO);
  718.       if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
  719.     }
  720.   } else if (desthi == RID_RETLO) {
  721.     emit_movrr(as, ir, RID_RETLO, RID_RETHI);
  722.     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
  723.   } else {
  724.     if (desthi != RID_RETHI) emit_movrr(as, ir, desthi, RID_RETHI);
  725.     if (destlo != RID_RETLO) emit_movrr(as, ir, destlo, RID_RETLO);
  726.   }
  727.   /* Restore spill slots (if any). */
  728.   if (ra_hasspill((ir+1)->s)) ra_save(as, ir+1, RID_RETHI);
  729.   if (ra_hasspill(ir->s)) ra_save(as, ir, RID_RETLO);
  730. }
  731. #endif

  732. /* -- Snapshot handling --------- ----------------------------------------- */

  733. /* Can we rematerialize a KNUM instead of forcing a spill? */
  734. static int asm_snap_canremat(ASMState *as)
  735. {
  736.   Reg r;
  737.   for (r = RID_MIN_FPR; r < RID_MAX_FPR; r++)
  738.     if (irref_isk(regcost_ref(as->cost[r])))
  739.       return 1;
  740.   return 0;
  741. }

  742. /* Check whether a sunk store corresponds to an allocation. */
  743. static int asm_sunk_store(ASMState *as, IRIns *ira, IRIns *irs)
  744. {
  745.   if (irs->s == 255) {
  746.     if (irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
  747.         irs->o == IR_FSTORE || irs->o == IR_XSTORE) {
  748.       IRIns *irk = IR(irs->op1);
  749.       if (irk->o == IR_AREF || irk->o == IR_HREFK)
  750.         irk = IR(irk->op1);
  751.       return (IR(irk->op1) == ira);
  752.     }
  753.     return 0;
  754.   } else {
  755.     return (ira + irs->s == irs);  /* Quick check. */
  756.   }
  757. }

  758. /* Allocate register or spill slot for a ref that escapes to a snapshot. */
  759. static void asm_snap_alloc1(ASMState *as, IRRef ref)
  760. {
  761.   IRIns *ir = IR(ref);
  762.   if (!irref_isk(ref) && (!(ra_used(ir) || ir->r == RID_SUNK))) {
  763.     if (ir->r == RID_SINK) {
  764.       ir->r = RID_SUNK;
  765. #if LJ_HASFFI
  766.       if (ir->o == IR_CNEWI) {  /* Allocate CNEWI value. */
  767.         asm_snap_alloc1(as, ir->op2);
  768.         if (LJ_32 && (ir+1)->o == IR_HIOP)
  769.           asm_snap_alloc1(as, (ir+1)->op2);
  770.       } else
  771. #endif
  772.       {  /* Allocate stored values for TNEW, TDUP and CNEW. */
  773.         IRIns *irs;
  774.         lua_assert(ir->o == IR_TNEW || ir->o == IR_TDUP || ir->o == IR_CNEW);
  775.         for (irs = IR(as->snapref-1); irs > ir; irs--)
  776.           if (irs->r == RID_SINK && asm_sunk_store(as, ir, irs)) {
  777.             lua_assert(irs->o == IR_ASTORE || irs->o == IR_HSTORE ||
  778.                        irs->o == IR_FSTORE || irs->o == IR_XSTORE);
  779.             asm_snap_alloc1(as, irs->op2);
  780.             if (LJ_32 && (irs+1)->o == IR_HIOP)
  781.               asm_snap_alloc1(as, (irs+1)->op2);
  782.           }
  783.       }
  784.     } else {
  785.       RegSet allow;
  786.       if (ir->o == IR_CONV && ir->op2 == IRCONV_NUM_INT) {
  787.         IRIns *irc;
  788.         for (irc = IR(as->curins); irc > ir; irc--)
  789.           if ((irc->op1 == ref || irc->op2 == ref) &&
  790.               !(irc->r == RID_SINK || irc->r == RID_SUNK))
  791.             goto nosink;  /* Don't sink conversion if result is used. */
  792.         asm_snap_alloc1(as, ir->op1);
  793.         return;
  794.       }
  795.     nosink:
  796.       allow = (!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR;
  797.       if ((as->freeset & allow) ||
  798.                (allow == RSET_FPR && asm_snap_canremat(as))) {
  799.         /* Get a weak register if we have a free one or can rematerialize. */
  800.         Reg r = ra_allocref(as, ref, allow);  /* Allocate a register. */
  801.         if (!irt_isphi(ir->t))
  802.           ra_weak(as, r);  /* But mark it as weakly referenced. */
  803.         checkmclim(as);
  804.         RA_DBGX((as, "snapreg   $f $r", ref, ir->r));
  805.       } else {
  806.         ra_spill(as, ir);  /* Otherwise force a spill slot. */
  807.         RA_DBGX((as, "snapspill $f $s", ref, ir->s));
  808.       }
  809.     }
  810.   }
  811. }

  812. /* Allocate refs escaping to a snapshot. */
  813. static void asm_snap_alloc(ASMState *as)
  814. {
  815.   SnapShot *snap = &as->T->snap[as->snapno];
  816.   SnapEntry *map = &as->T->snapmap[snap->mapofs];
  817.   MSize n, nent = snap->nent;
  818.   for (n = 0; n < nent; n++) {
  819.     SnapEntry sn = map[n];
  820.     IRRef ref = snap_ref(sn);
  821.     if (!irref_isk(ref)) {
  822.       asm_snap_alloc1(as, ref);
  823.       if (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM)) {
  824.         lua_assert(irt_type(IR(ref+1)->t) == IRT_SOFTFP);
  825.         asm_snap_alloc1(as, ref+1);
  826.       }
  827.     }
  828.   }
  829. }

  830. /* All guards for a snapshot use the same exitno. This is currently the
  831. ** same as the snapshot number. Since the exact origin of the exit cannot
  832. ** be determined, all guards for the same snapshot must exit with the same
  833. ** RegSP mapping.
  834. ** A renamed ref which has been used in a prior guard for the same snapshot
  835. ** would cause an inconsistency. The easy way out is to force a spill slot.
  836. */
  837. static int asm_snap_checkrename(ASMState *as, IRRef ren)
  838. {
  839.   SnapShot *snap = &as->T->snap[as->snapno];
  840.   SnapEntry *map = &as->T->snapmap[snap->mapofs];
  841.   MSize n, nent = snap->nent;
  842.   for (n = 0; n < nent; n++) {
  843.     SnapEntry sn = map[n];
  844.     IRRef ref = snap_ref(sn);
  845.     if (ref == ren || (LJ_SOFTFP && (sn & SNAP_SOFTFPNUM) && ++ref == ren)) {
  846.       IRIns *ir = IR(ref);
  847.       ra_spill(as, ir);  /* Register renamed, so force a spill slot. */
  848.       RA_DBGX((as, "snaprensp $f $s", ref, ir->s));
  849.       return 1/* Found. */
  850.     }
  851.   }
  852.   return 0/* Not found. */
  853. }

  854. /* Prepare snapshot for next guard instruction. */
  855. static void asm_snap_prep(ASMState *as)
  856. {
  857.   if (as->curins < as->snapref) {
  858.     do {
  859.       if (as->snapno == 0) return/* Called by sunk stores before snap #0. */
  860.       as->snapno--;
  861.       as->snapref = as->T->snap[as->snapno].ref;
  862.     } while (as->curins < as->snapref);
  863.     asm_snap_alloc(as);
  864.     as->snaprename = as->T->nins;
  865.   } else {
  866.     /* Process any renames above the highwater mark. */
  867.     for (; as->snaprename < as->T->nins; as->snaprename++) {
  868.       IRIns *ir = IR(as->snaprename);
  869.       if (asm_snap_checkrename(as, ir->op1))
  870.         ir->op2 = REF_BIAS-1/* Kill rename. */
  871.     }
  872.   }
  873. }

  874. /* -- Miscellaneous helpers ----------------------------------------------- */

  875. /* Calculate stack adjustment. */
  876. static int32_t asm_stack_adjust(ASMState *as)
  877. {
  878.   if (as->evenspill <= SPS_FIXED)
  879.     return 0;
  880.   return sps_scale(sps_align(as->evenspill));
  881. }

  882. /* Must match with hash*() in lj_tab.c. */
  883. static uint32_t ir_khash(IRIns *ir)
  884. {
  885.   uint32_t lo, hi;
  886.   if (irt_isstr(ir->t)) {
  887.     return ir_kstr(ir)->hash;
  888.   } else if (irt_isnum(ir->t)) {
  889.     lo = ir_knum(ir)->u32.lo;
  890.     hi = ir_knum(ir)->u32.hi << 1;
  891.   } else if (irt_ispri(ir->t)) {
  892.     lua_assert(!irt_isnil(ir->t));
  893.     return irt_type(ir->t)-IRT_FALSE;
  894.   } else {
  895.     lua_assert(irt_isgcv(ir->t));
  896.     lo = u32ptr(ir_kgc(ir));
  897.     hi = lo + HASH_BIAS;
  898.   }
  899.   return hashrot(lo, hi);
  900. }

  901. /* -- Allocations --------------------------------------------------------- */

  902. static void asm_gencall(ASMState *as, const CCallInfo *ci, IRRef *args);
  903. static void asm_setupresult(ASMState *as, IRIns *ir, const CCallInfo *ci);

  904. static void asm_snew(ASMState *as, IRIns *ir)
  905. {
  906.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_str_new];
  907.   IRRef args[3];
  908.   args[0] = ASMREF_L/* lua_State *L    */
  909.   args[1] = ir->op1;   /* const char *str */
  910.   args[2] = ir->op2;   /* size_t len      */
  911.   as->gcsteps++;
  912.   asm_setupresult(as, ir, ci);  /* GCstr * */
  913.   asm_gencall(as, ci, args);
  914. }

  915. static void asm_tnew(ASMState *as, IRIns *ir)
  916. {
  917.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_new1];
  918.   IRRef args[2];
  919.   args[0] = ASMREF_L;     /* lua_State *L    */
  920.   args[1] = ASMREF_TMP1/* uint32_t ahsize */
  921.   as->gcsteps++;
  922.   asm_setupresult(as, ir, ci);  /* GCtab * */
  923.   asm_gencall(as, ci, args);
  924.   ra_allockreg(as, ir->op1 | (ir->op2 << 24), ra_releasetmp(as, ASMREF_TMP1));
  925. }

  926. static void asm_tdup(ASMState *as, IRIns *ir)
  927. {
  928.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_dup];
  929.   IRRef args[2];
  930.   args[0] = ASMREF_L/* lua_State *L    */
  931.   args[1] = ir->op1;   /* const GCtab *kt */
  932.   as->gcsteps++;
  933.   asm_setupresult(as, ir, ci);  /* GCtab * */
  934.   asm_gencall(as, ci, args);
  935. }

  936. static void asm_gc_check(ASMState *as);

  937. /* Explicit GC step. */
  938. static void asm_gcstep(ASMState *as, IRIns *ir)
  939. {
  940.   IRIns *ira;
  941.   for (ira = IR(as->stopins+1); ira < ir; ira++)
  942.     if ((ira->o == IR_TNEW || ira->o == IR_TDUP ||
  943.          (LJ_HASFFI && (ira->o == IR_CNEW || ira->o == IR_CNEWI))) &&
  944.         ra_used(ira))
  945.       as->gcsteps++;
  946.   if (as->gcsteps)
  947.     asm_gc_check(as);
  948.   as->gcsteps = 0x80000000/* Prevent implicit GC check further up. */
  949. }

  950. /* -- Buffer operations --------------------------------------------------- */

  951. static void asm_tvptr(ASMState *as, Reg dest, IRRef ref);

  952. static void asm_bufhdr(ASMState *as, IRIns *ir)
  953. {
  954.   Reg sb = ra_dest(as, ir, RSET_GPR);
  955.   if ((ir->op2 & IRBUFHDR_APPEND)) {
  956.     /* Rematerialize const buffer pointer instead of likely spill. */
  957.     IRIns *irp = IR(ir->op1);
  958.     if (!(ra_hasreg(irp->r) || irp == ir-1 ||
  959.           (irp == ir-2 && !ra_used(ir-1)))) {
  960.       while (!(irp->o == IR_BUFHDR && !(irp->op2 & IRBUFHDR_APPEND)))
  961.         irp = IR(irp->op1);
  962.       if (irref_isk(irp->op1)) {
  963.         ra_weak(as, ra_allocref(as, ir->op1, RSET_GPR));
  964.         ir = irp;
  965.       }
  966.     }
  967.   } else {
  968.     Reg tmp = ra_scratch(as, rset_exclude(RSET_GPR, sb));
  969.     /* Passing ir isn't strictly correct, but it's an IRT_P32, too. */
  970.     emit_storeofs(as, ir, tmp, sb, offsetof(SBuf, p));
  971.     emit_loadofs(as, ir, tmp, sb, offsetof(SBuf, b));
  972.   }
  973. #if LJ_TARGET_X86ORX64
  974.   ra_left(as, sb, ir->op1);
  975. #else
  976.   ra_leftov(as, sb, ir->op1);
  977. #endif
  978. }

  979. static void asm_bufput(ASMState *as, IRIns *ir)
  980. {
  981.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_putstr];
  982.   IRRef args[3];
  983.   IRIns *irs;
  984.   int kchar = -1;
  985.   args[0] = ir->op1;  /* SBuf * */
  986.   args[1] = ir->op2;  /* GCstr * */
  987.   irs = IR(ir->op2);
  988.   lua_assert(irt_isstr(irs->t));
  989.   if (irs->o == IR_KGC) {
  990.     GCstr *s = ir_kstr(irs);
  991.     if (s->len == 1) {  /* Optimize put of single-char string constant. */
  992.       kchar = strdata(s)[0];
  993.       args[1] = ASMREF_TMP1/* int, truncated to char */
  994.       ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
  995.     }
  996.   } else if (mayfuse(as, ir->op2) && ra_noreg(irs->r)) {
  997.     if (irs->o == IR_TOSTR) {  /* Fuse number to string conversions. */
  998.       if (irs->op2 == IRTOSTR_NUM) {
  999.         args[1] = ASMREF_TMP1/* TValue * */
  1000.         ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putnum];
  1001.       } else {
  1002.         lua_assert(irt_isinteger(IR(irs->op1)->t));
  1003.         args[1] = irs->op1;  /* int */
  1004.         if (irs->op2 == IRTOSTR_INT)
  1005.           ci = &lj_ir_callinfo[IRCALL_lj_strfmt_putint];
  1006.         else
  1007.           ci = &lj_ir_callinfo[IRCALL_lj_buf_putchar];
  1008.       }
  1009.     } else if (irs->o == IR_SNEW) {  /* Fuse string allocation. */
  1010.       args[1] = irs->op1;  /* const void * */
  1011.       args[2] = irs->op2;  /* MSize */
  1012.       ci = &lj_ir_callinfo[IRCALL_lj_buf_putmem];
  1013.     }
  1014.   }
  1015.   asm_setupresult(as, ir, ci);  /* SBuf * */
  1016.   asm_gencall(as, ci, args);
  1017.   if (args[1] == ASMREF_TMP1) {
  1018.     Reg tmp = ra_releasetmp(as, ASMREF_TMP1);
  1019.     if (kchar == -1)
  1020.       asm_tvptr(as, tmp, irs->op1);
  1021.     else
  1022.       ra_allockreg(as, kchar, tmp);
  1023.   }
  1024. }

  1025. static void asm_bufstr(ASMState *as, IRIns *ir)
  1026. {
  1027.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_buf_tostr];
  1028.   IRRef args[1];
  1029.   args[0] = ir->op1;  /* SBuf *sb */
  1030.   as->gcsteps++;
  1031.   asm_setupresult(as, ir, ci);  /* GCstr * */
  1032.   asm_gencall(as, ci, args);
  1033. }

  1034. /* -- Type conversions ---------------------------------------------------- */

  1035. static void asm_tostr(ASMState *as, IRIns *ir)
  1036. {
  1037.   const CCallInfo *ci;
  1038.   IRRef args[2];
  1039.   args[0] = ASMREF_L;
  1040.   as->gcsteps++;
  1041.   if (ir->op2 == IRTOSTR_NUM) {
  1042.     args[1] = ASMREF_TMP1/* cTValue * */
  1043.     ci = &lj_ir_callinfo[IRCALL_lj_strfmt_num];
  1044.   } else {
  1045.     args[1] = ir->op1;  /* int32_t k */
  1046.     if (ir->op2 == IRTOSTR_INT)
  1047.       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_int];
  1048.     else
  1049.       ci = &lj_ir_callinfo[IRCALL_lj_strfmt_char];
  1050.   }
  1051.   asm_setupresult(as, ir, ci);  /* GCstr * */
  1052.   asm_gencall(as, ci, args);
  1053.   if (ir->op2 == IRTOSTR_NUM)
  1054.     asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op1);
  1055. }

  1056. #if LJ_32 && LJ_HASFFI && !LJ_SOFTFP && !LJ_TARGET_X86
  1057. static void asm_conv64(ASMState *as, IRIns *ir)
  1058. {
  1059.   IRType st = (IRType)((ir-1)->op2 & IRCONV_SRCMASK);
  1060.   IRType dt = (((ir-1)->op2 & IRCONV_DSTMASK) >> IRCONV_DSH);
  1061.   IRCallID id;
  1062.   IRRef args[2];
  1063.   lua_assert((ir-1)->o == IR_CONV && ir->o == IR_HIOP);
  1064.   args[LJ_BE] = (ir-1)->op1;
  1065.   args[LJ_LE] = ir->op1;
  1066.   if (st == IRT_NUM || st == IRT_FLOAT) {
  1067.     id = IRCALL_fp64_d2l + ((st == IRT_FLOAT) ? 2 : 0) + (dt - IRT_I64);
  1068.     ir--;
  1069.   } else {
  1070.     id = IRCALL_fp64_l2d + ((dt == IRT_FLOAT) ? 2 : 0) + (st - IRT_I64);
  1071.   }
  1072.   {
  1073. #if LJ_TARGET_ARM && !LJ_ABI_SOFTFP
  1074.     CCallInfo cim = lj_ir_callinfo[id], *ci = &cim;
  1075.     cim.flags |= CCI_VARARG/* These calls don't use the hard-float ABI! */
  1076. #else
  1077.     const CCallInfo *ci = &lj_ir_callinfo[id];
  1078. #endif
  1079.     asm_setupresult(as, ir, ci);
  1080.     asm_gencall(as, ci, args);
  1081.   }
  1082. }
  1083. #endif

  1084. /* -- Memory references --------------------------------------------------- */

  1085. static void asm_newref(ASMState *as, IRIns *ir)
  1086. {
  1087.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_lj_tab_newkey];
  1088.   IRRef args[3];
  1089.   if (ir->r == RID_SINK)
  1090.     return;
  1091.   args[0] = ASMREF_L;     /* lua_State *L */
  1092.   args[1] = ir->op1;      /* GCtab *t     */
  1093.   args[2] = ASMREF_TMP1/* cTValue *key */
  1094.   asm_setupresult(as, ir, ci);  /* TValue * */
  1095.   asm_gencall(as, ci, args);
  1096.   asm_tvptr(as, ra_releasetmp(as, ASMREF_TMP1), ir->op2);
  1097. }

  1098. static void asm_lref(ASMState *as, IRIns *ir)
  1099. {
  1100.   Reg r = ra_dest(as, ir, RSET_GPR);
  1101. #if LJ_TARGET_X86ORX64
  1102.   ra_left(as, r, ASMREF_L);
  1103. #else
  1104.   ra_leftov(as, r, ASMREF_L);
  1105. #endif
  1106. }

  1107. /* -- Calls --------------------------------------------------------------- */

  1108. /* Collect arguments from CALL* and CARG instructions. */
  1109. static void asm_collectargs(ASMState *as, IRIns *ir,
  1110.                             const CCallInfo *ci, IRRef *args)
  1111. {
  1112.   uint32_t n = CCI_XNARGS(ci);
  1113.   lua_assert(n <= CCI_NARGS_MAX*2);  /* Account for split args. */
  1114.   if ((ci->flags & CCI_L)) { *args++ = ASMREF_L; n--; }
  1115.   while (n-- > 1) {
  1116.     ir = IR(ir->op1);
  1117.     lua_assert(ir->o == IR_CARG);
  1118.     args[n] = ir->op2 == REF_NIL ? 0 : ir->op2;
  1119.   }
  1120.   args[0] = ir->op1 == REF_NIL ? 0 : ir->op1;
  1121.   lua_assert(IR(ir->op1)->o != IR_CARG);
  1122. }

  1123. /* Reconstruct CCallInfo flags for CALLX*. */
  1124. static uint32_t asm_callx_flags(ASMState *as, IRIns *ir)
  1125. {
  1126.   uint32_t nargs = 0;
  1127.   if (ir->op1 != REF_NIL) {  /* Count number of arguments first. */
  1128.     IRIns *ira = IR(ir->op1);
  1129.     nargs++;
  1130.     while (ira->o == IR_CARG) { nargs++; ira = IR(ira->op1); }
  1131.   }
  1132. #if LJ_HASFFI
  1133.   if (IR(ir->op2)->o == IR_CARG) {  /* Copy calling convention info. */
  1134.     CTypeID id = (CTypeID)IR(IR(ir->op2)->op2)->i;
  1135.     CType *ct = ctype_get(ctype_ctsG(J2G(as->J)), id);
  1136.     nargs |= ((ct->info & CTF_VARARG) ? CCI_VARARG : 0);
  1137. #if LJ_TARGET_X86
  1138.     nargs |= (ctype_cconv(ct->info) << CCI_CC_SHIFT);
  1139. #endif
  1140.   }
  1141. #endif
  1142.   return (nargs | (ir->t.irt << CCI_OTSHIFT));
  1143. }

  1144. static void asm_callid(ASMState *as, IRIns *ir, IRCallID id)
  1145. {
  1146.   const CCallInfo *ci = &lj_ir_callinfo[id];
  1147.   IRRef args[2];
  1148.   args[0] = ir->op1;
  1149.   args[1] = ir->op2;
  1150.   asm_setupresult(as, ir, ci);
  1151.   asm_gencall(as, ci, args);
  1152. }

  1153. static void asm_call(ASMState *as, IRIns *ir)
  1154. {
  1155.   IRRef args[CCI_NARGS_MAX];
  1156.   const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
  1157.   asm_collectargs(as, ir, ci, args);
  1158.   asm_setupresult(as, ir, ci);
  1159.   asm_gencall(as, ci, args);
  1160. }

  1161. #if !LJ_SOFTFP
  1162. static void asm_fppow(ASMState *as, IRIns *ir, IRRef lref, IRRef rref)
  1163. {
  1164.   const CCallInfo *ci = &lj_ir_callinfo[IRCALL_pow];
  1165.   IRRef args[2];
  1166.   args[0] = lref;
  1167.   args[1] = rref;
  1168.   asm_setupresult(as, ir, ci);
  1169.   asm_gencall(as, ci, args);
  1170. }

  1171. static int asm_fpjoin_pow(ASMState *as, IRIns *ir)
  1172. {
  1173.   IRIns *irp = IR(ir->op1);
  1174.   if (irp == ir-1 && irp->o == IR_MUL && !ra_used(irp)) {
  1175.     IRIns *irpp = IR(irp->op1);
  1176.     if (irpp == ir-2 && irpp->o == IR_FPMATH &&
  1177.         irpp->op2 == IRFPM_LOG2 && !ra_used(irpp)) {
  1178.       asm_fppow(as, ir, irpp->op1, irp->op2);
  1179.       return 1;
  1180.     }
  1181.   }
  1182.   return 0;
  1183. }
  1184. #endif

  1185. /* -- PHI and loop handling ----------------------------------------------- */

  1186. /* Break a PHI cycle by renaming to a free register (evict if needed). */
  1187. static void asm_phi_break(ASMState *as, RegSet blocked, RegSet blockedby,
  1188.                           RegSet allow)
  1189. {
  1190.   RegSet candidates = blocked & allow;
  1191.   if (candidates) {  /* If this register file has candidates. */
  1192.     /* Note: the set for ra_pick cannot be empty, since each register file
  1193.     ** has some registers never allocated to PHIs.
  1194.     */
  1195.     Reg down, up = ra_pick(as, ~blocked & allow);  /* Get a free register. */
  1196.     if (candidates & ~blockedby)  /* Optimize shifts, else it's a cycle. */
  1197.       candidates = candidates & ~blockedby;
  1198.     down = rset_picktop(candidates);  /* Pick candidate PHI register. */
  1199.     ra_rename(as, down, up);  /* And rename it to the free register. */
  1200.   }
  1201. }

  1202. /* PHI register shuffling.
  1203. **
  1204. ** The allocator tries hard to preserve PHI register assignments across
  1205. ** the loop body. Most of the time this loop does nothing, since there
  1206. ** are no register mismatches.
  1207. **
  1208. ** If a register mismatch is detected and ...
  1209. ** - the register is currently free: rename it.
  1210. ** - the register is blocked by an invariant: restore/remat and rename it.
  1211. ** - Otherwise the register is used by another PHI, so mark it as blocked.
  1212. **
  1213. ** The renames are order-sensitive, so just retry the loop if a register
  1214. ** is marked as blocked, but has been freed in the meantime. A cycle is
  1215. ** detected if all of the blocked registers are allocated. To break the
  1216. ** cycle rename one of them to a free register and retry.
  1217. **
  1218. ** Note that PHI spill slots are kept in sync and don't need to be shuffled.
  1219. */
  1220. static void asm_phi_shuffle(ASMState *as)
  1221. {
  1222.   RegSet work;

  1223.   /* Find and resolve PHI register mismatches. */
  1224.   for (;;) {
  1225.     RegSet blocked = RSET_EMPTY;
  1226.     RegSet blockedby = RSET_EMPTY;
  1227.     RegSet phiset = as->phiset;
  1228.     while (phiset) {  /* Check all left PHI operand registers. */
  1229.       Reg r = rset_pickbot(phiset);
  1230.       IRIns *irl = IR(as->phireg[r]);
  1231.       Reg left = irl->r;
  1232.       if (r != left) {  /* Mismatch? */
  1233.         if (!rset_test(as->freeset, r)) {  /* PHI register blocked? */
  1234.           IRRef ref = regcost_ref(as->cost[r]);
  1235.           /* Blocked by other PHI (w/reg)? */
  1236.           if (!ra_iskref(ref) && irt_ismarked(IR(ref)->t)) {
  1237.             rset_set(blocked, r);
  1238.             if (ra_hasreg(left))
  1239.               rset_set(blockedby, left);
  1240.             left = RID_NONE;
  1241.           } else/* Otherwise grab register from invariant. */
  1242.             ra_restore(as, ref);
  1243.             checkmclim(as);
  1244.           }
  1245.         }
  1246.         if (ra_hasreg(left)) {
  1247.           ra_rename(as, left, r);
  1248.           checkmclim(as);
  1249.         }
  1250.       }
  1251.       rset_clear(phiset, r);
  1252.     }
  1253.     if (!blocked) break/* Finished. */
  1254.     if (!(as->freeset & blocked)) {  /* Break cycles if none are free. */
  1255.       asm_phi_break(as, blocked, blockedby, RSET_GPR);
  1256.       if (!LJ_SOFTFP) asm_phi_break(as, blocked, blockedby, RSET_FPR);
  1257.       checkmclim(as);
  1258.     }  /* Else retry some more renames. */
  1259.   }

  1260.   /* Restore/remat invariants whose registers are modified inside the loop. */
  1261. #if !LJ_SOFTFP
  1262.   work = as->modset & ~(as->freeset | as->phiset) & RSET_FPR;
  1263.   while (work) {
  1264.     Reg r = rset_pickbot(work);
  1265.     ra_restore(as, regcost_ref(as->cost[r]));
  1266.     rset_clear(work, r);
  1267.     checkmclim(as);
  1268.   }
  1269. #endif
  1270.   work = as->modset & ~(as->freeset | as->phiset);
  1271.   while (work) {
  1272.     Reg r = rset_pickbot(work);
  1273.     ra_restore(as, regcost_ref(as->cost[r]));
  1274.     rset_clear(work, r);
  1275.     checkmclim(as);
  1276.   }

  1277.   /* Allocate and save all unsaved PHI regs and clear marks. */
  1278.   work = as->phiset;
  1279.   while (work) {
  1280.     Reg r = rset_picktop(work);
  1281.     IRRef lref = as->phireg[r];
  1282.     IRIns *ir = IR(lref);
  1283.     if (ra_hasspill(ir->s)) {  /* Left PHI gained a spill slot? */
  1284.       irt_clearmark(ir->t);  /* Handled here, so clear marker now. */
  1285.       ra_alloc1(as, lref, RID2RSET(r));
  1286.       ra_save(as, ir, r);  /* Save to spill slot inside the loop. */
  1287.       checkmclim(as);
  1288.     }
  1289.     rset_clear(work, r);
  1290.   }
  1291. }

  1292. /* Copy unsynced left/right PHI spill slots. Rarely needed. */
  1293. static void asm_phi_copyspill(ASMState *as)
  1294. {
  1295.   int need = 0;
  1296.   IRIns *ir;
  1297.   for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--)
  1298.     if (ra_hasspill(ir->s) && ra_hasspill(IR(ir->op1)->s))
  1299.       need |= irt_isfp(ir->t) ? 2 : 1/* Unsynced spill slot? */
  1300.   if ((need & 1)) {  /* Copy integer spill slots. */
  1301. #if !LJ_TARGET_X86ORX64
  1302.     Reg r = RID_TMP;
  1303. #else
  1304.     Reg r = RID_RET;
  1305.     if ((as->freeset & RSET_GPR))
  1306.       r = rset_pickbot((as->freeset & RSET_GPR));
  1307.     else
  1308.       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
  1309. #endif
  1310.     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
  1311.       if (ra_hasspill(ir->s)) {
  1312.         IRIns *irl = IR(ir->op1);
  1313.         if (ra_hasspill(irl->s) && !irt_isfp(ir->t)) {
  1314.           emit_spstore(as, irl, r, sps_scale(irl->s));
  1315.           emit_spload(as, ir, r, sps_scale(ir->s));
  1316.           checkmclim(as);
  1317.         }
  1318.       }
  1319.     }
  1320. #if LJ_TARGET_X86ORX64
  1321.     if (!rset_test(as->freeset, r))
  1322.       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
  1323. #endif
  1324.   }
  1325. #if !LJ_SOFTFP
  1326.   if ((need & 2)) {  /* Copy FP spill slots. */
  1327. #if LJ_TARGET_X86
  1328.     Reg r = RID_XMM0;
  1329. #else
  1330.     Reg r = RID_FPRET;
  1331. #endif
  1332.     if ((as->freeset & RSET_FPR))
  1333.       r = rset_pickbot((as->freeset & RSET_FPR));
  1334.     if (!rset_test(as->freeset, r))
  1335.       emit_spload(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
  1336.     for (ir = IR(as->orignins-1); ir->o == IR_PHI; ir--) {
  1337.       if (ra_hasspill(ir->s)) {
  1338.         IRIns *irl = IR(ir->op1);
  1339.         if (ra_hasspill(irl->s) && irt_isfp(ir->t)) {
  1340.           emit_spstore(as, irl, r, sps_scale(irl->s));
  1341.           emit_spload(as, ir, r, sps_scale(ir->s));
  1342.           checkmclim(as);
  1343.         }
  1344.       }
  1345.     }
  1346.     if (!rset_test(as->freeset, r))
  1347.       emit_spstore(as, IR(regcost_ref(as->cost[r])), r, SPOFS_TMP);
  1348.   }
  1349. #endif
  1350. }

  1351. /* Emit renames for left PHIs which are only spilled outside the loop. */
  1352. static void asm_phi_fixup(ASMState *as)
  1353. {
  1354.   RegSet work = as->phiset;
  1355.   while (work) {
  1356.     Reg r = rset_picktop(work);
  1357.     IRRef lref = as->phireg[r];
  1358.     IRIns *ir = IR(lref);
  1359.     if (irt_ismarked(ir->t)) {
  1360.       irt_clearmark(ir->t);
  1361.       /* Left PHI gained a spill slot before the loop? */
  1362.       if (ra_hasspill(ir->s)) {
  1363.         IRRef ren;
  1364.         lj_ir_set(as->J, IRT(IR_RENAME, IRT_NIL), lref, as->loopsnapno);
  1365.         ren = tref_ref(lj_ir_emit(as->J));
  1366.         as->ir = as->T->ir;  /* The IR may have been reallocated. */
  1367.         IR(ren)->r = (uint8_t)r;
  1368.         IR(ren)->s = SPS_NONE;
  1369.       }
  1370.     }
  1371.     rset_clear(work, r);
  1372.   }
  1373. }

  1374. /* Setup right PHI reference. */
  1375. static void asm_phi(ASMState *as, IRIns *ir)
  1376. {
  1377.   RegSet allow = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) &
  1378.                  ~as->phiset;
  1379.   RegSet afree = (as->freeset & allow);
  1380.   IRIns *irl = IR(ir->op1);
  1381.   IRIns *irr = IR(ir->op2);
  1382.   if (ir->r == RID_SINK/* Sink PHI. */
  1383.     return;
  1384.   /* Spill slot shuffling is not implemented yet (but rarely needed). */
  1385.   if (ra_hasspill(irl->s) || ra_hasspill(irr->s))
  1386.     lj_trace_err(as->J, LJ_TRERR_NYIPHI);
  1387.   /* Leave at least one register free for non-PHIs (and PHI cycle breaking). */
  1388.   if ((afree & (afree-1))) {  /* Two or more free registers? */
  1389.     Reg r;
  1390.     if (ra_noreg(irr->r)) {  /* Get a register for the right PHI. */
  1391.       r = ra_allocref(as, ir->op2, allow);
  1392.     } else/* Duplicate right PHI, need a copy (rare). */
  1393.       r = ra_scratch(as, allow);
  1394.       emit_movrr(as, irr, r, irr->r);
  1395.     }
  1396.     ir->r = (uint8_t)r;
  1397.     rset_set(as->phiset, r);
  1398.     as->phireg[r] = (IRRef1)ir->op1;
  1399.     irt_setmark(irl->t);  /* Marks left PHIs _with_ register. */
  1400.     if (ra_noreg(irl->r))
  1401.       ra_sethint(irl->r, r); /* Set register hint for left PHI. */
  1402.   } else/* Otherwise allocate a spill slot. */
  1403.     /* This is overly restrictive, but it triggers only on synthetic code. */
  1404.     if (ra_hasreg(irl->r) || ra_hasreg(irr->r))
  1405.       lj_trace_err(as->J, LJ_TRERR_NYIPHI);
  1406.     ra_spill(as, ir);
  1407.     irr->s = ir->s;  /* Set right PHI spill slot. Sync left slot later. */
  1408.   }
  1409. }

  1410. static void asm_loop_fixup(ASMState *as);

  1411. /* Middle part of a loop. */
  1412. static void asm_loop(ASMState *as)
  1413. {
  1414.   MCode *mcspill;
  1415.   /* LOOP is a guard, so the snapno is up to date. */
  1416.   as->loopsnapno = as->snapno;
  1417.   if (as->gcsteps)
  1418.     asm_gc_check(as);
  1419.   /* LOOP marks the transition from the variant to the invariant part. */
  1420.   as->flagmcp = as->invmcp = NULL;
  1421.   as->sectref = 0;
  1422.   if (!neverfuse(as)) as->fuseref = 0;
  1423.   asm_phi_shuffle(as);
  1424.   mcspill = as->mcp;
  1425.   asm_phi_copyspill(as);
  1426.   asm_loop_fixup(as);
  1427.   as->mcloop = as->mcp;
  1428.   RA_DBGX((as, "===== LOOP ====="));
  1429.   if (!as->realign) RA_DBG_FLUSH();
  1430.   if (as->mcp != mcspill)
  1431.     emit_jmp(as, mcspill);
  1432. }

  1433. /* -- Target-specific assembler ------------------------------------------- */

  1434. #if LJ_TARGET_X86ORX64
  1435. #include "lj_asm_x86.h"
  1436. #elif LJ_TARGET_ARM
  1437. #include "lj_asm_arm.h"
  1438. #elif LJ_TARGET_PPC
  1439. #include "lj_asm_ppc.h"
  1440. #elif LJ_TARGET_MIPS
  1441. #include "lj_asm_mips.h"
  1442. #else
  1443. #error "Missing assembler for target CPU"
  1444. #endif

  1445. /* -- Instruction dispatch ------------------------------------------------ */

  1446. /* Assemble a single instruction. */
  1447. static void asm_ir(ASMState *as, IRIns *ir)
  1448. {
  1449.   switch ((IROp)ir->o) {
  1450.   /* Miscellaneous ops. */
  1451.   case IR_LOOP: asm_loop(as); break;
  1452.   case IR_NOP: case IR_XBAR: lua_assert(!ra_used(ir)); break;
  1453.   case IR_USE:
  1454.     ra_alloc1(as, ir->op1, irt_isfp(ir->t) ? RSET_FPR : RSET_GPR); break;
  1455.   case IR_PHI: asm_phi(as, ir); break;
  1456.   case IR_HIOP: asm_hiop(as, ir); break;
  1457.   case IR_GCSTEP: asm_gcstep(as, ir); break;
  1458.   case IR_PROF: asm_prof(as, ir); break;

  1459.   /* Guarded assertions. */
  1460.   case IR_LT: case IR_GE: case IR_LE: case IR_GT:
  1461.   case IR_ULT: case IR_UGE: case IR_ULE: case IR_UGT:
  1462.   case IR_ABC:
  1463.     asm_comp(as, ir);
  1464.     break;
  1465.   case IR_EQ: case IR_NE:
  1466.     if ((ir-1)->o == IR_HREF && ir->op1 == as->curins-1) {
  1467.       as->curins--;
  1468.       asm_href(as, ir-1, (IROp)ir->o);
  1469.     } else {
  1470.       asm_equal(as, ir);
  1471.     }
  1472.     break;

  1473.   case IR_RETF: asm_retf(as, ir); break;

  1474.   /* Bit ops. */
  1475.   case IR_BNOT: asm_bnot(as, ir); break;
  1476.   case IR_BSWAP: asm_bswap(as, ir); break;
  1477.   case IR_BAND: asm_band(as, ir); break;
  1478.   case IR_BOR: asm_bor(as, ir); break;
  1479.   case IR_BXOR: asm_bxor(as, ir); break;
  1480.   case IR_BSHL: asm_bshl(as, ir); break;
  1481.   case IR_BSHR: asm_bshr(as, ir); break;
  1482.   case IR_BSAR: asm_bsar(as, ir); break;
  1483.   case IR_BROL: asm_brol(as, ir); break;
  1484.   case IR_BROR: asm_bror(as, ir); break;

  1485.   /* Arithmetic ops. */
  1486.   case IR_ADD: asm_add(as, ir); break;
  1487.   case IR_SUB: asm_sub(as, ir); break;
  1488.   case IR_MUL: asm_mul(as, ir); break;
  1489.   case IR_DIV: asm_div(as, ir); break;
  1490.   case IR_MOD: asm_mod(as, ir); break;
  1491.   case IR_POW: asm_pow(as, ir); break;
  1492.   case IR_NEG: asm_neg(as, ir); break;
  1493.   case IR_ABS: asm_abs(as, ir); break;
  1494.   case IR_ATAN2: asm_atan2(as, ir); break;
  1495.   case IR_LDEXP: asm_ldexp(as, ir); break;
  1496.   case IR_MIN: asm_min(as, ir); break;
  1497.   case IR_MAX: asm_max(as, ir); break;
  1498.   case IR_FPMATH: asm_fpmath(as, ir); break;

  1499.   /* Overflow-checking arithmetic ops. */
  1500.   case IR_ADDOV: asm_addov(as, ir); break;
  1501.   case IR_SUBOV: asm_subov(as, ir); break;
  1502.   case IR_MULOV: asm_mulov(as, ir); break;

  1503.   /* Memory references. */
  1504.   case IR_AREF: asm_aref(as, ir); break;
  1505.   case IR_HREF: asm_href(as, ir, 0); break;
  1506.   case IR_HREFK: asm_hrefk(as, ir); break;
  1507.   case IR_NEWREF: asm_newref(as, ir); break;
  1508.   case IR_UREFO: case IR_UREFC: asm_uref(as, ir); break;
  1509.   case IR_FREF: asm_fref(as, ir); break;
  1510.   case IR_STRREF: asm_strref(as, ir); break;
  1511.   case IR_LREF: asm_lref(as, ir); break;

  1512.   /* Loads and stores. */
  1513.   case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
  1514.     asm_ahuvload(as, ir);
  1515.     break;
  1516.   case IR_FLOAD: asm_fload(as, ir); break;
  1517.   case IR_XLOAD: asm_xload(as, ir); break;
  1518.   case IR_SLOAD: asm_sload(as, ir); break;

  1519.   case IR_ASTORE: case IR_HSTORE: case IR_USTORE: asm_ahustore(as, ir); break;
  1520.   case IR_FSTORE: asm_fstore(as, ir); break;
  1521.   case IR_XSTORE: asm_xstore(as, ir); break;

  1522.   /* Allocations. */
  1523.   case IR_SNEW: case IR_XSNEW: asm_snew(as, ir); break;
  1524.   case IR_TNEW: asm_tnew(as, ir); break;
  1525.   case IR_TDUP: asm_tdup(as, ir); break;
  1526.   case IR_CNEW: case IR_CNEWI: asm_cnew(as, ir); break;

  1527.   /* Buffer operations. */
  1528.   case IR_BUFHDR: asm_bufhdr(as, ir); break;
  1529.   case IR_BUFPUT: asm_bufput(as, ir); break;
  1530.   case IR_BUFSTR: asm_bufstr(as, ir); break;

  1531.   /* Write barriers. */
  1532.   case IR_TBAR: asm_tbar(as, ir); break;
  1533.   case IR_OBAR: asm_obar(as, ir); break;

  1534.   /* Type conversions. */
  1535.   case IR_TOBIT: asm_tobit(as, ir); break;
  1536.   case IR_CONV: asm_conv(as, ir); break;
  1537.   case IR_TOSTR: asm_tostr(as, ir); break;
  1538.   case IR_STRTO: asm_strto(as, ir); break;

  1539.   /* Calls. */
  1540.   case IR_CALLA:
  1541.     as->gcsteps++;
  1542.     /* fallthrough */
  1543.   case IR_CALLN: case IR_CALLL: case IR_CALLS: asm_call(as, ir); break;
  1544.   case IR_CALLXS: asm_callx(as, ir); break;
  1545.   case IR_CARG: break;

  1546.   default:
  1547.     setintV(&as->J->errinfo, ir->o);
  1548.     lj_trace_err_info(as->J, LJ_TRERR_NYIIR);
  1549.     break;
  1550.   }
  1551. }

  1552. /* -- Head of trace ------------------------------------------------------- */

  1553. /* Head of a root trace. */
  1554. static void asm_head_root(ASMState *as)
  1555. {
  1556.   int32_t spadj;
  1557.   asm_head_root_base(as);
  1558.   emit_setvmstate(as, (int32_t)as->T->traceno);
  1559.   spadj = asm_stack_adjust(as);
  1560.   as->T->spadjust = (uint16_t)spadj;
  1561.   emit_spsub(as, spadj);
  1562.   /* Root traces assume a checked stack for the starting proto. */
  1563.   as->T->topslot = gcref(as->T->startpt)->pt.framesize;
  1564. }

  1565. /* Head of a side trace.
  1566. **
  1567. ** The current simplistic algorithm requires that all slots inherited
  1568. ** from the parent are live in a register between pass 2 and pass 3. This
  1569. ** avoids the complexity of stack slot shuffling. But of course this may
  1570. ** overflow the register set in some cases and cause the dreaded error:
  1571. ** "NYI: register coalescing too complex". A refined algorithm is needed.
  1572. */
  1573. static void asm_head_side(ASMState *as)
  1574. {
  1575.   IRRef1 sloadins[RID_MAX];
  1576.   RegSet allow = RSET_ALL/* Inverse of all coalesced registers. */
  1577.   RegSet live = RSET_EMPTY/* Live parent registers. */
  1578.   IRIns *irp = &as->parent->ir[REF_BASE];  /* Parent base. */
  1579.   int32_t spadj, spdelta;
  1580.   int pass2 = 0;
  1581.   int pass3 = 0;
  1582.   IRRef i;

  1583.   if (as->snapno && as->topslot > as->parent->topslot) {
  1584.     /* Force snap #0 alloc to prevent register overwrite in stack check. */
  1585.     as->snapno = 0;
  1586.     asm_snap_alloc(as);
  1587.   }
  1588.   allow = asm_head_side_base(as, irp, allow);

  1589.   /* Scan all parent SLOADs and collect register dependencies. */
  1590.   for (i = as->stopins; i > REF_BASE; i--) {
  1591.     IRIns *ir = IR(i);
  1592.     RegSP rs;
  1593.     lua_assert((ir->o == IR_SLOAD && (ir->op2 & IRSLOAD_PARENT)) ||
  1594.                (LJ_SOFTFP && ir->o == IR_HIOP) || ir->o == IR_PVAL);
  1595.     rs = as->parentmap[i - REF_FIRST];
  1596.     if (ra_hasreg(ir->r)) {
  1597.       rset_clear(allow, ir->r);
  1598.       if (ra_hasspill(ir->s)) {
  1599.         ra_save(as, ir, ir->r);
  1600.         checkmclim(as);
  1601.       }
  1602.     } else if (ra_hasspill(ir->s)) {
  1603.       irt_setmark(ir->t);
  1604.       pass2 = 1;
  1605.     }
  1606.     if (ir->r == rs) {  /* Coalesce matching registers right now. */
  1607.       ra_free(as, ir->r);
  1608.     } else if (ra_hasspill(regsp_spill(rs))) {
  1609.       if (ra_hasreg(ir->r))
  1610.         pass3 = 1;
  1611.     } else if (ra_used(ir)) {
  1612.       sloadins[rs] = (IRRef1)i;
  1613.       rset_set(live, rs);  /* Block live parent register. */
  1614.     }
  1615.   }

  1616.   /* Calculate stack frame adjustment. */
  1617.   spadj = asm_stack_adjust(as);
  1618.   spdelta = spadj - (int32_t)as->parent->spadjust;
  1619.   if (spdelta < 0) {  /* Don't shrink the stack frame. */
  1620.     spadj = (int32_t)as->parent->spadjust;
  1621.     spdelta = 0;
  1622.   }
  1623.   as->T->spadjust = (uint16_t)spadj;

  1624.   /* Reload spilled target registers. */
  1625.   if (pass2) {
  1626.     for (i = as->stopins; i > REF_BASE; i--) {
  1627.       IRIns *ir = IR(i);
  1628.       if (irt_ismarked(ir->t)) {
  1629.         RegSet mask;
  1630.         Reg r;
  1631.         RegSP rs;
  1632.         irt_clearmark(ir->t);
  1633.         rs = as->parentmap[i - REF_FIRST];
  1634.         if (!ra_hasspill(regsp_spill(rs)))
  1635.           ra_sethint(ir->r, rs);  /* Hint may be gone, set it again. */
  1636.         else if (sps_scale(regsp_spill(rs))+spdelta == sps_scale(ir->s))
  1637.           continue/* Same spill slot, do nothing. */
  1638.         mask = ((!LJ_SOFTFP && irt_isfp(ir->t)) ? RSET_FPR : RSET_GPR) & allow;
  1639.         if (mask == RSET_EMPTY)
  1640.           lj_trace_err(as->J, LJ_TRERR_NYICOAL);
  1641.         r = ra_allocref(as, i, mask);
  1642.         ra_save(as, ir, r);
  1643.         rset_clear(allow, r);
  1644.         if (r == rs) {  /* Coalesce matching registers right now. */
  1645.           ra_free(as, r);
  1646.           rset_clear(live, r);
  1647.         } else if (ra_hasspill(regsp_spill(rs))) {
  1648.           pass3 = 1;
  1649.         }
  1650.         checkmclim(as);
  1651.       }
  1652.     }
  1653.   }

  1654.   /* Store trace number and adjust stack frame relative to the parent. */
  1655.   emit_setvmstate(as, (int32_t)as->T->traceno);
  1656.   emit_spsub(as, spdelta);

  1657. #if !LJ_TARGET_X86ORX64
  1658.   /* Restore BASE register from parent spill slot. */
  1659.   if (ra_hasspill(irp->s))
  1660.     emit_spload(as, IR(REF_BASE), IR(REF_BASE)->r, sps_scale(irp->s));
  1661. #endif

  1662.   /* Restore target registers from parent spill slots. */
  1663.   if (pass3) {
  1664.     RegSet work = ~as->freeset & RSET_ALL;
  1665.     while (work) {
  1666.       Reg r = rset_pickbot(work);
  1667.       IRRef ref = regcost_ref(as->cost[r]);
  1668.       RegSP rs = as->parentmap[ref - REF_FIRST];
  1669.       rset_clear(work, r);
  1670.       if (ra_hasspill(regsp_spill(rs))) {
  1671.         int32_t ofs = sps_scale(regsp_spill(rs));
  1672.         ra_free(as, r);
  1673.         emit_spload(as, IR(ref), r, ofs);
  1674.         checkmclim(as);
  1675.       }
  1676.     }
  1677.   }

  1678.   /* Shuffle registers to match up target regs with parent regs. */
  1679.   for (;;) {
  1680.     RegSet work;

  1681.     /* Repeatedly coalesce free live registers by moving to their target. */
  1682.     while ((work = as->freeset & live) != RSET_EMPTY) {
  1683.       Reg rp = rset_pickbot(work);
  1684.       IRIns *ir = IR(sloadins[rp]);
  1685.       rset_clear(live, rp);
  1686.       rset_clear(allow, rp);
  1687.       ra_free(as, ir->r);
  1688.       emit_movrr(as, ir, ir->r, rp);
  1689.       checkmclim(as);
  1690.     }

  1691.     /* We're done if no live registers remain. */
  1692.     if (live == RSET_EMPTY)
  1693.       break;

  1694.     /* Break cycles by renaming one target to a temp. register. */
  1695.     if (live & RSET_GPR) {
  1696.       RegSet tmpset = as->freeset & ~live & allow & RSET_GPR;
  1697.       if (tmpset == RSET_EMPTY)
  1698.         lj_trace_err(as->J, LJ_TRERR_NYICOAL);
  1699.       ra_rename(as, rset_pickbot(live & RSET_GPR), rset_pickbot(tmpset));
  1700.     }
  1701.     if (!LJ_SOFTFP && (live & RSET_FPR)) {
  1702.       RegSet tmpset = as->freeset & ~live & allow & RSET_FPR;
  1703.       if (tmpset == RSET_EMPTY)
  1704.         lj_trace_err(as->J, LJ_TRERR_NYICOAL);
  1705.       ra_rename(as, rset_pickbot(live & RSET_FPR), rset_pickbot(tmpset));
  1706.     }
  1707.     checkmclim(as);
  1708.     /* Continue with coalescing to fix up the broken cycle(s). */
  1709.   }

  1710.   /* Inherit top stack slot already checked by parent trace. */
  1711.   as->T->topslot = as->parent->topslot;
  1712.   if (as->topslot > as->T->topslot) {  /* Need to check for higher slot? */
  1713. #ifdef EXITSTATE_CHECKEXIT
  1714.     /* Highest exit + 1 indicates stack check. */
  1715.     ExitNo exitno = as->T->nsnap;
  1716. #else
  1717.     /* Reuse the parent exit in the context of the parent trace. */
  1718.     ExitNo exitno = as->J->exitno;
  1719. #endif
  1720.     as->T->topslot = (uint8_t)as->topslot;  /* Remember for child traces. */
  1721.     asm_stack_check(as, as->topslot, irp, allow & RSET_GPR, exitno);
  1722.   }
  1723. }

  1724. /* -- Tail of trace ------------------------------------------------------- */

  1725. /* Get base slot for a snapshot. */
  1726. static BCReg asm_baseslot(ASMState *as, SnapShot *snap, int *gotframe)
  1727. {
  1728.   SnapEntry *map = &as->T->snapmap[snap->mapofs];
  1729.   MSize n;
  1730.   for (n = snap->nent; n > 0; n--) {
  1731.     SnapEntry sn = map[n-1];
  1732.     if ((sn & SNAP_FRAME)) {
  1733.       *gotframe = 1;
  1734.       return snap_slot(sn);
  1735.     }
  1736.   }
  1737.   return 0;
  1738. }

  1739. /* Link to another trace. */
  1740. static void asm_tail_link(ASMState *as)
  1741. {
  1742.   SnapNo snapno = as->T->nsnap-1/* Last snapshot. */
  1743.   SnapShot *snap = &as->T->snap[snapno];
  1744.   int gotframe = 0;
  1745.   BCReg baseslot = asm_baseslot(as, snap, &gotframe);

  1746.   as->topslot = snap->topslot;
  1747.   checkmclim(as);
  1748.   ra_allocref(as, REF_BASE, RID2RSET(RID_BASE));

  1749.   if (as->T->link == 0) {
  1750.     /* Setup fixed registers for exit to interpreter. */
  1751.     const BCIns *pc = snap_pc(as->T->snapmap[snap->mapofs + snap->nent]);
  1752.     int32_t mres;
  1753.     if (bc_op(*pc) == BC_JLOOP) {  /* NYI: find a better way to do this. */
  1754.       BCIns *retpc = &traceref(as->J, bc_d(*pc))->startins;
  1755.       if (bc_isret(bc_op(*retpc)))
  1756.         pc = retpc;
  1757.     }
  1758.     ra_allockreg(as, i32ptr(J2GG(as->J)->dispatch), RID_DISPATCH);
  1759.     ra_allockreg(as, i32ptr(pc), RID_LPC);
  1760.     mres = (int32_t)(snap->nslots - baseslot);
  1761.     switch (bc_op(*pc)) {
  1762.     case BC_CALLM: case BC_CALLMT:
  1763.       mres -= (int32_t)(1 + LJ_FR2 + bc_a(*pc) + bc_c(*pc)); break;
  1764.     case BC_RETM: mres -= (int32_t)(bc_a(*pc) + bc_d(*pc)); break;
  1765.     case BC_TSETM: mres -= (int32_t)bc_a(*pc); break;
  1766.     default: if (bc_op(*pc) < BC_FUNCF) mres = 0; break;
  1767.     }
  1768.     ra_allockreg(as, mres, RID_RET);  /* Return MULTRES or 0. */
  1769.   } else if (baseslot) {
  1770.     /* Save modified BASE for linking to trace with higher start frame. */
  1771.     emit_setgl(as, RID_BASE, jit_base);
  1772.   }
  1773.   emit_addptr(as, RID_BASE, 8*(int32_t)baseslot);

  1774.   /* Sync the interpreter state with the on-trace state. */
  1775.   asm_stack_restore(as, snap);

  1776.   /* Root traces that add frames need to check the stack at the end. */
  1777.   if (!as->parent && gotframe)
  1778.     asm_stack_check(as, as->topslot, NULL, as->freeset & RSET_GPR, snapno);
  1779. }

  1780. /* -- Trace setup --------------------------------------------------------- */

  1781. /* Clear reg/sp for all instructions and add register hints. */
  1782. static void asm_setup_regsp(ASMState *as)
  1783. {
  1784.   GCtrace *T = as->T;
  1785.   int sink = T->sinktags;
  1786.   IRRef nins = T->nins;
  1787.   IRIns *ir, *lastir;
  1788.   int inloop;
  1789. #if LJ_TARGET_ARM
  1790.   uint32_t rload = 0xa6402a64;
  1791. #endif

  1792.   ra_setup(as);

  1793.   /* Clear reg/sp for constants. */
  1794.   for (ir = IR(T->nk), lastir = IR(REF_BASE); ir < lastir; ir++)
  1795.     ir->prev = REGSP_INIT;

  1796.   /* REF_BASE is used for implicit references to the BASE register. */
  1797.   lastir->prev = REGSP_HINT(RID_BASE);

  1798.   ir = IR(nins-1);
  1799.   if (ir->o == IR_RENAME) {
  1800.     do { ir--; nins--; } while (ir->o == IR_RENAME);
  1801.     T->nins = nins;  /* Remove any renames left over from ASM restart. */
  1802.   }
  1803.   as->snaprename = nins;
  1804.   as->snapref = nins;
  1805.   as->snapno = T->nsnap;

  1806.   as->stopins = REF_BASE;
  1807.   as->orignins = nins;
  1808.   as->curins = nins;

  1809.   /* Setup register hints for parent link instructions. */
  1810.   ir = IR(REF_FIRST);
  1811.   if (as->parent) {
  1812.     uint16_t *p;
  1813.     lastir = lj_snap_regspmap(as->parent, as->J->exitno, ir);
  1814.     if (lastir - ir > LJ_MAX_JSLOTS)
  1815.       lj_trace_err(as->J, LJ_TRERR_NYICOAL);
  1816.     as->stopins = (IRRef)((lastir-1) - as->ir);
  1817.     for (p = as->parentmap; ir < lastir; ir++) {
  1818.       RegSP rs = ir->prev;
  1819.       *p++ = (uint16_t)rs;  /* Copy original parent RegSP to parentmap. */
  1820.       if (!ra_hasspill(regsp_spill(rs)))
  1821.         ir->prev = (uint16_t)REGSP_HINT(regsp_reg(rs));
  1822.       else
  1823.         ir->prev = REGSP_INIT;
  1824.     }
  1825.   }

  1826.   inloop = 0;
  1827.   as->evenspill = SPS_FIRST;
  1828.   for (lastir = IR(nins); ir < lastir; ir++) {
  1829.     if (sink) {
  1830.       if (ir->r == RID_SINK)
  1831.         continue;
  1832.       if (ir->r == RID_SUNK) {  /* Revert after ASM restart. */
  1833.         ir->r = RID_SINK;
  1834.         continue;
  1835.       }
  1836.     }
  1837.     switch (ir->o) {
  1838.     case IR_LOOP:
  1839.       inloop = 1;
  1840.       break;
  1841. #if LJ_TARGET_ARM
  1842.     case IR_SLOAD:
  1843.       if (!((ir->op2 & IRSLOAD_TYPECHECK) || (ir+1)->o == IR_HIOP))
  1844.         break;
  1845.       /* fallthrough */
  1846.     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
  1847.       if (!LJ_SOFTFP && irt_isnum(ir->t)) break;
  1848.       ir->prev = (uint16_t)REGSP_HINT((rload & 15));
  1849.       rload = lj_ror(rload, 4);
  1850.       continue;
  1851. #endif
  1852.     case IR_CALLXS: {
  1853.       CCallInfo ci;
  1854.       ci.flags = asm_callx_flags(as, ir);
  1855.       ir->prev = asm_setup_call_slots(as, ir, &ci);
  1856.       if (inloop)
  1857.         as->modset |= RSET_SCRATCH;
  1858.       continue;
  1859.       }
  1860.     case IR_CALLN: case IR_CALLA: case IR_CALLL: case IR_CALLS: {
  1861.       const CCallInfo *ci = &lj_ir_callinfo[ir->op2];
  1862.       ir->prev = asm_setup_call_slots(as, ir, ci);
  1863.       if (inloop)
  1864.         as->modset |= (ci->flags & CCI_NOFPRCLOBBER) ?
  1865.                       (RSET_SCRATCH & ~RSET_FPR) : RSET_SCRATCH;
  1866.       continue;
  1867.       }
  1868. #if LJ_SOFTFP || (LJ_32 && LJ_HASFFI)
  1869.     case IR_HIOP:
  1870.       switch ((ir-1)->o) {
  1871. #if LJ_SOFTFP && LJ_TARGET_ARM
  1872.       case IR_SLOAD: case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
  1873.         if (ra_hashint((ir-1)->r)) {
  1874.           ir->prev = (ir-1)->prev + 1;
  1875.           continue;
  1876.         }
  1877.         break;
  1878. #endif
  1879. #if !LJ_SOFTFP && LJ_NEED_FP64
  1880.       case IR_CONV:
  1881.         if (irt_isfp((ir-1)->t)) {
  1882.           ir->prev = REGSP_HINT(RID_FPRET);
  1883.           continue;
  1884.         }
  1885.         /* fallthrough */
  1886. #endif
  1887.       case IR_CALLN: case IR_CALLXS:
  1888. #if LJ_SOFTFP
  1889.       case IR_MIN: case IR_MAX:
  1890. #endif
  1891.         (ir-1)->prev = REGSP_HINT(RID_RETLO);
  1892.         ir->prev = REGSP_HINT(RID_RETHI);
  1893.         continue;
  1894.       default:
  1895.         break;
  1896.       }
  1897.       break;
  1898. #endif
  1899. #if LJ_SOFTFP
  1900.     case IR_MIN: case IR_MAX:
  1901.       if ((ir+1)->o != IR_HIOP) break;
  1902.       /* fallthrough */
  1903. #endif
  1904.     /* C calls evict all scratch regs and return results in RID_RET. */
  1905.     case IR_SNEW: case IR_XSNEW: case IR_NEWREF: case IR_BUFPUT:
  1906.       if (REGARG_NUMGPR < 3 && as->evenspill < 3)
  1907.         as->evenspill = 3/* lj_str_new and lj_tab_newkey need 3 args. */
  1908. #if LJ_TARGET_X86 && LJ_HASFFI
  1909.       if (0) {
  1910.     case IR_CNEW:
  1911.         if (ir->op2 != REF_NIL && as->evenspill < 4)
  1912.           as->evenspill = 4/* lj_cdata_newv needs 4 args. */
  1913.       }
  1914. #else
  1915.     case IR_CNEW:
  1916. #endif
  1917.     case IR_TNEW: case IR_TDUP: case IR_CNEWI: case IR_TOSTR:
  1918.     case IR_BUFSTR:
  1919.       ir->prev = REGSP_HINT(RID_RET);
  1920.       if (inloop)
  1921.         as->modset = RSET_SCRATCH;
  1922.       continue;
  1923.     case IR_STRTO: case IR_OBAR:
  1924.       if (inloop)
  1925.         as->modset = RSET_SCRATCH;
  1926.       break;
  1927. #if !LJ_SOFTFP
  1928.     case IR_ATAN2:
  1929. #if LJ_TARGET_X86
  1930.       if (as->evenspill < 4/* Leave room to call atan2(). */
  1931.         as->evenspill = 4;
  1932. #endif
  1933. #if !LJ_TARGET_X86ORX64
  1934.     case IR_LDEXP:
  1935. #endif
  1936. #endif
  1937.     case IR_POW:
  1938.       if (!LJ_SOFTFP && irt_isnum(ir->t)) {
  1939.         if (inloop)
  1940.           as->modset |= RSET_SCRATCH;
  1941. #if LJ_TARGET_X86
  1942.         break;
  1943. #else
  1944.         ir->prev = REGSP_HINT(RID_FPRET);
  1945.         continue;
  1946. #endif
  1947.       }
  1948.       /* fallthrough for integer POW */
  1949.     case IR_DIV: case IR_MOD:
  1950.       if (!irt_isnum(ir->t)) {
  1951.         ir->prev = REGSP_HINT(RID_RET);
  1952.         if (inloop)
  1953.           as->modset |= (RSET_SCRATCH & RSET_GPR);
  1954.         continue;
  1955.       }
  1956.       break;
  1957.     case IR_FPMATH:
  1958. #if LJ_TARGET_X86ORX64
  1959.       if (ir->op2 <= IRFPM_TRUNC) {
  1960.         if (!(as->flags & JIT_F_SSE4_1)) {
  1961.           ir->prev = REGSP_HINT(RID_XMM0);
  1962.           if (inloop)
  1963.             as->modset |= RSET_RANGE(RID_XMM0, RID_XMM3+1)|RID2RSET(RID_EAX);
  1964.           continue;
  1965.         }
  1966.         break;
  1967.       } else if (ir->op2 == IRFPM_EXP2 && !LJ_64) {
  1968.         if (as->evenspill < 4/* Leave room to call pow(). */
  1969.           as->evenspill = 4;
  1970.       }
  1971. #endif
  1972.       if (inloop)
  1973.         as->modset |= RSET_SCRATCH;
  1974. #if LJ_TARGET_X86
  1975.       break;
  1976. #else
  1977.       ir->prev = REGSP_HINT(RID_FPRET);
  1978.       continue;
  1979. #endif
  1980. #if LJ_TARGET_X86ORX64
  1981.     /* Non-constant shift counts need to be in RID_ECX on x86/x64. */
  1982.     case IR_BSHL: case IR_BSHR: case IR_BSAR: case IR_BROL: case IR_BROR:
  1983.       if (!irref_isk(ir->op2) && !ra_hashint(IR(ir->op2)->r)) {
  1984.         IR(ir->op2)->r = REGSP_HINT(RID_ECX);
  1985.         if (inloop)
  1986.           rset_set(as->modset, RID_ECX);
  1987.       }
  1988.       break;
  1989. #endif
  1990.     /* Do not propagate hints across type conversions or loads. */
  1991.     case IR_TOBIT:
  1992.     case IR_XLOAD:
  1993. #if !LJ_TARGET_ARM
  1994.     case IR_ALOAD: case IR_HLOAD: case IR_ULOAD: case IR_VLOAD:
  1995. #endif
  1996.       break;
  1997.     case IR_CONV:
  1998.       if (irt_isfp(ir->t) || (ir->op2 & IRCONV_SRCMASK) == IRT_NUM ||
  1999.           (ir->op2 & IRCONV_SRCMASK) == IRT_FLOAT)
  2000.         break;
  2001.       /* fallthrough */
  2002.     default:
  2003.       /* Propagate hints across likely 'op reg, imm' or 'op reg'. */
  2004.       if (irref_isk(ir->op2) && !irref_isk(ir->op1) &&
  2005.           ra_hashint(regsp_reg(IR(ir->op1)->prev))) {
  2006.         ir->prev = IR(ir->op1)->prev;
  2007.         continue;
  2008.       }
  2009.       break;
  2010.     }
  2011.     ir->prev = REGSP_INIT;
  2012.   }
  2013.   if ((as->evenspill & 1))
  2014.     as->oddspill = as->evenspill++;
  2015.   else
  2016.     as->oddspill = 0;
  2017. }

  2018. /* -- Assembler core ------------------------------------------------------ */

  2019. /* Assemble a trace. */
  2020. void lj_asm_trace(jit_State *J, GCtrace *T)
  2021. {
  2022.   ASMState as_;
  2023.   ASMState *as = &as_;
  2024.   MCode *origtop;

  2025.   /* Ensure an initialized instruction beyond the last one for HIOP checks. */
  2026.   J->cur.nins = lj_ir_nextins(J);
  2027.   J->cur.ir[J->cur.nins].o = IR_NOP;

  2028.   /* Setup initial state. Copy some fields to reduce indirections. */
  2029.   as->J = J;
  2030.   as->T = T;
  2031.   as->ir = T->ir;
  2032.   as->flags = J->flags;
  2033.   as->loopref = J->loopref;
  2034.   as->realign = NULL;
  2035.   as->loopinv = 0;
  2036.   as->parent = J->parent ? traceref(J, J->parent) : NULL;

  2037.   /* Reserve MCode memory. */
  2038.   as->mctop = origtop = lj_mcode_reserve(J, &as->mcbot);
  2039.   as->mcp = as->mctop;
  2040.   as->mclim = as->mcbot + MCLIM_REDZONE;
  2041.   asm_setup_target(as);

  2042.   do {
  2043.     as->mcp = as->mctop;
  2044. #ifdef LUA_USE_ASSERT
  2045.     as->mcp_prev = as->mcp;
  2046. #endif
  2047.     as->curins = T->nins;
  2048.     RA_DBG_START();
  2049.     RA_DBGX((as, "===== STOP ====="));

  2050.     /* General trace setup. Emit tail of trace. */
  2051.     asm_tail_prep(as);
  2052.     as->mcloop = NULL;
  2053.     as->flagmcp = NULL;
  2054.     as->topslot = 0;
  2055.     as->gcsteps = 0;
  2056.     as->sectref = as->loopref;
  2057.     as->fuseref = (as->flags & JIT_F_OPT_FUSE) ? as->loopref : FUSE_DISABLED;
  2058.     asm_setup_regsp(as);
  2059.     if (!as->loopref)
  2060.       asm_tail_link(as);

  2061.     /* Assemble a trace in linear backwards order. */
  2062.     for (as->curins--; as->curins > as->stopins; as->curins--) {
  2063.       IRIns *ir = IR(as->curins);
  2064.       lua_assert(!(LJ_32 && irt_isint64(ir->t)));  /* Handled by SPLIT. */
  2065.       if (!ra_used(ir) && !ir_sideeff(ir) && (as->flags & JIT_F_OPT_DCE))
  2066.         continue/* Dead-code elimination can be soooo easy. */
  2067.       if (irt_isguard(ir->t))
  2068.         asm_snap_prep(as);
  2069.       RA_DBG_REF();
  2070.       checkmclim(as);
  2071.       asm_ir(as, ir);
  2072.     }
  2073.   } while (as->realign);  /* Retry in case the MCode needs to be realigned. */

  2074.   /* Emit head of trace. */
  2075.   RA_DBG_REF();
  2076.   checkmclim(as);
  2077.   if (as->gcsteps > 0) {
  2078.     as->curins = as->T->snap[0].ref;
  2079.     asm_snap_prep(as);  /* The GC check is a guard. */
  2080.     asm_gc_check(as);
  2081.   }
  2082.   ra_evictk(as);
  2083.   if (as->parent)
  2084.     asm_head_side(as);
  2085.   else
  2086.     asm_head_root(as);
  2087.   asm_phi_fixup(as);

  2088.   RA_DBGX((as, "===== START ===="));
  2089.   RA_DBG_FLUSH();
  2090.   if (as->freeset != RSET_ALL)
  2091.     lj_trace_err(as->J, LJ_TRERR_BADRA);  /* Ouch! Should never happen. */

  2092.   /* Set trace entry point before fixing up tail to allow link to self. */
  2093.   T->mcode = as->mcp;
  2094.   T->mcloop = as->mcloop ? (MSize)((char *)as->mcloop - (char *)as->mcp) : 0;
  2095.   if (!as->loopref)
  2096.     asm_tail_fixup(as, T->link);  /* Note: this may change as->mctop! */
  2097.   T->szmcode = (MSize)((char *)as->mctop - (char *)as->mcp);
  2098.   lj_mcode_sync(T->mcode, origtop);
  2099. }

  2100. #undef IR

  2101. #endif