src/lj_mcode.c - luajit-2.0-src

Data types defined

Functions defined

Macros defined

Source code

  1. /*
  2. ** Machine code management.
  3. ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
  4. */

  5. #define lj_mcode_c
  6. #define LUA_CORE

  7. #include "lj_obj.h"
  8. #if LJ_HASJIT
  9. #include "lj_gc.h"
  10. #include "lj_err.h"
  11. #include "lj_jit.h"
  12. #include "lj_mcode.h"
  13. #include "lj_trace.h"
  14. #include "lj_dispatch.h"
  15. #endif
  16. #if LJ_HASJIT || LJ_HASFFI
  17. #include "lj_vm.h"
  18. #endif

  19. /* -- OS-specific functions ----------------------------------------------- */

  20. #if LJ_HASJIT || LJ_HASFFI

  21. /* Define this if you want to run LuaJIT with Valgrind. */
  22. #ifdef LUAJIT_USE_VALGRIND
  23. #include <valgrind/valgrind.h>
  24. #endif

  25. #if LJ_TARGET_IOS
  26. void sys_icache_invalidate(void *start, size_t len);
  27. #endif

  28. /* Synchronize data/instruction cache. */
  29. void lj_mcode_sync(void *start, void *end)
  30. {
  31. #ifdef LUAJIT_USE_VALGRIND
  32.   VALGRIND_DISCARD_TRANSLATIONS(start, (char *)end-(char *)start);
  33. #endif
  34. #if LJ_TARGET_X86ORX64
  35.   UNUSED(start); UNUSED(end);
  36. #elif LJ_TARGET_IOS
  37.   sys_icache_invalidate(start, (char *)end-(char *)start);
  38. #elif LJ_TARGET_PPC
  39.   lj_vm_cachesync(start, end);
  40. #elif defined(__GNUC__)
  41.   __clear_cache(start, end);
  42. #else
  43. #error "Missing builtin to flush instruction cache"
  44. #endif
  45. }

  46. #endif

  47. #if LJ_HASJIT

  48. #if LJ_TARGET_WINDOWS

  49. #define WIN32_LEAN_AND_MEAN
  50. #include <windows.h>

  51. #define MCPROT_RW        PAGE_READWRITE
  52. #define MCPROT_RX        PAGE_EXECUTE_READ
  53. #define MCPROT_RWX        PAGE_EXECUTE_READWRITE

  54. static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, DWORD prot)
  55. {
  56.   void *p = VirtualAlloc((void *)hint, sz,
  57.                          MEM_RESERVE|MEM_COMMIT|MEM_TOP_DOWN, prot);
  58.   if (!p && !hint)
  59.     lj_trace_err(J, LJ_TRERR_MCODEAL);
  60.   return p;
  61. }

  62. static void mcode_free(jit_State *J, void *p, size_t sz)
  63. {
  64.   UNUSED(J); UNUSED(sz);
  65.   VirtualFree(p, 0, MEM_RELEASE);
  66. }

  67. static int mcode_setprot(void *p, size_t sz, DWORD prot)
  68. {
  69.   DWORD oprot;
  70.   return !VirtualProtect(p, sz, prot, &oprot);
  71. }

  72. #elif LJ_TARGET_POSIX

  73. #include <sys/mman.h>

  74. #ifndef MAP_ANONYMOUS
  75. #define MAP_ANONYMOUS        MAP_ANON
  76. #endif

  77. #define MCPROT_RW        (PROT_READ|PROT_WRITE)
  78. #define MCPROT_RX        (PROT_READ|PROT_EXEC)
  79. #define MCPROT_RWX        (PROT_READ|PROT_WRITE|PROT_EXEC)

  80. static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
  81. {
  82.   void *p = mmap((void *)hint, sz, prot, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0);
  83.   if (p == MAP_FAILED) {
  84.     if (!hint) lj_trace_err(J, LJ_TRERR_MCODEAL);
  85.     p = NULL;
  86.   }
  87.   return p;
  88. }

  89. static void mcode_free(jit_State *J, void *p, size_t sz)
  90. {
  91.   UNUSED(J);
  92.   munmap(p, sz);
  93. }

  94. static int mcode_setprot(void *p, size_t sz, int prot)
  95. {
  96.   return mprotect(p, sz, prot);
  97. }

  98. #elif LJ_64

  99. #error "Missing OS support for explicit placement of executable memory"

  100. #else

  101. /* Fallback allocator. This will fail if memory is not executable by default. */
  102. #define LUAJIT_UNPROTECT_MCODE
  103. #define MCPROT_RW        0
  104. #define MCPROT_RX        0
  105. #define MCPROT_RWX        0

  106. static void *mcode_alloc_at(jit_State *J, uintptr_t hint, size_t sz, int prot)
  107. {
  108.   UNUSED(hint); UNUSED(prot);
  109.   return lj_mem_new(J->L, sz);
  110. }

  111. static void mcode_free(jit_State *J, void *p, size_t sz)
  112. {
  113.   lj_mem_free(J2G(J), p, sz);
  114. }

  115. #endif

  116. /* -- MCode area protection ----------------------------------------------- */

  117. /* Define this ONLY if page protection twiddling becomes a bottleneck. */
  118. #ifdef LUAJIT_UNPROTECT_MCODE

  119. /* It's generally considered to be a potential security risk to have
  120. ** pages with simultaneous write *and* execute access in a process.
  121. **
  122. ** Do not even think about using this mode for server processes or
  123. ** apps handling untrusted external data (such as a browser).
  124. **
  125. ** The security risk is not in LuaJIT itself -- but if an adversary finds
  126. ** any *other* flaw in your C application logic, then any RWX memory page
  127. ** simplifies writing an exploit considerably.
  128. */
  129. #define MCPROT_GEN        MCPROT_RWX
  130. #define MCPROT_RUN        MCPROT_RWX

  131. static void mcode_protect(jit_State *J, int prot)
  132. {
  133.   UNUSED(J); UNUSED(prot);
  134. }

  135. #else

  136. /* This is the default behaviour and much safer:
  137. **
  138. ** Most of the time the memory pages holding machine code are executable,
  139. ** but NONE of them is writable.
  140. **
  141. ** The current memory area is marked read-write (but NOT executable) only
  142. ** during the short time window while the assembler generates machine code.
  143. */
  144. #define MCPROT_GEN        MCPROT_RW
  145. #define MCPROT_RUN        MCPROT_RX

  146. /* Protection twiddling failed. Probably due to kernel security. */
  147. static LJ_NOINLINE void mcode_protfail(jit_State *J)
  148. {
  149.   lua_CFunction panic = J2G(J)->panic;
  150.   if (panic) {
  151.     lua_State *L = J->L;
  152.     setstrV(L, L->top++, lj_err_str(L, LJ_ERR_JITPROT));
  153.     panic(L);
  154.   }
  155. }

  156. /* Change protection of MCode area. */
  157. static void mcode_protect(jit_State *J, int prot)
  158. {
  159.   if (J->mcprot != prot) {
  160.     if (LJ_UNLIKELY(mcode_setprot(J->mcarea, J->szmcarea, prot)))
  161.       mcode_protfail(J);
  162.     J->mcprot = prot;
  163.   }
  164. }

  165. #endif

  166. /* -- MCode area allocation ----------------------------------------------- */

  167. #if LJ_TARGET_X64
  168. #define mcode_validptr(p)        ((p) && (uintptr_t)(p) < (uintptr_t)1<<47)
  169. #else
  170. #define mcode_validptr(p)        ((p) && (uintptr_t)(p) < 0xffff0000)
  171. #endif

  172. #ifdef LJ_TARGET_JUMPRANGE

  173. /* Get memory within relative jump distance of our code in 64 bit mode. */
  174. static void *mcode_alloc(jit_State *J, size_t sz)
  175. {
  176.   /* Target an address in the static assembler code (64K aligned).
  177.   ** Try addresses within a distance of target-range/2+1MB..target+range/2-1MB.
  178.   ** Use half the jump range so every address in the range can reach any other.
  179.   */
  180. #if LJ_TARGET_MIPS
  181.   /* Use the middle of the 256MB-aligned region. */
  182.   uintptr_t target = ((uintptr_t)(void *)lj_vm_exit_handler & 0xf0000000u) +
  183.                      0x08000000u;
  184. #else
  185.   uintptr_t target = (uintptr_t)(void *)lj_vm_exit_handler & ~(uintptr_t)0xffff;
  186. #endif
  187.   const uintptr_t range = (1u << (LJ_TARGET_JUMPRANGE-1)) - (1u << 21);
  188.   /* First try a contiguous area below the last one. */
  189.   uintptr_t hint = J->mcarea ? (uintptr_t)J->mcarea - sz : 0;
  190.   int i;
  191.   for (i = 0; i < 32; i++) {  /* 32 attempts ought to be enough ... */
  192.     if (mcode_validptr(hint)) {
  193.       void *p = mcode_alloc_at(J, hint, sz, MCPROT_GEN);

  194.       if (mcode_validptr(p) &&
  195.           ((uintptr_t)p + sz - target < range || target - (uintptr_t)p < range))
  196.         return p;
  197.       if (p) mcode_free(J, p, sz);  /* Free badly placed area. */
  198.     }
  199.     /* Next try probing pseudo-random addresses. */
  200.     do {
  201.       hint = (0x78fb ^ LJ_PRNG_BITS(J, 15)) << 16/* 64K aligned. */
  202.     } while (!(hint + sz < range));
  203.     hint = target + hint - (range>>1);
  204.   }
  205.   lj_trace_err(J, LJ_TRERR_MCODEAL);  /* Give up. OS probably ignores hints? */
  206.   return NULL;
  207. }

  208. #else

  209. /* All memory addresses are reachable by relative jumps. */
  210. static void *mcode_alloc(jit_State *J, size_t sz)
  211. {
  212. #ifdef __OpenBSD__
  213.   /* Allow better executable memory allocation for OpenBSD W^X mode. */
  214.   void *p = mcode_alloc_at(J, 0, sz, MCPROT_RUN);
  215.   if (p && mcode_setprot(p, sz, MCPROT_GEN)) {
  216.     mcode_free(J, p, sz);
  217.     return NULL;
  218.   }
  219.   return p;
  220. #else
  221.   return mcode_alloc_at(J, 0, sz, MCPROT_GEN);
  222. #endif
  223. }

  224. #endif

  225. /* -- MCode area management ----------------------------------------------- */

  226. /* Linked list of MCode areas. */
  227. typedef struct MCLink {
  228.   MCode *next;                /* Next area. */
  229.   size_t size;                /* Size of current area. */
  230. } MCLink;

  231. /* Allocate a new MCode area. */
  232. static void mcode_allocarea(jit_State *J)
  233. {
  234.   MCode *oldarea = J->mcarea;
  235.   size_t sz = (size_t)J->param[JIT_P_sizemcode] << 10;
  236.   sz = (sz + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
  237.   J->mcarea = (MCode *)mcode_alloc(J, sz);
  238.   J->szmcarea = sz;
  239.   J->mcprot = MCPROT_GEN;
  240.   J->mctop = (MCode *)((char *)J->mcarea + J->szmcarea);
  241.   J->mcbot = (MCode *)((char *)J->mcarea + sizeof(MCLink));
  242.   ((MCLink *)J->mcarea)->next = oldarea;
  243.   ((MCLink *)J->mcarea)->size = sz;
  244.   J->szallmcarea += sz;
  245. }

  246. /* Free all MCode areas. */
  247. void lj_mcode_free(jit_State *J)
  248. {
  249.   MCode *mc = J->mcarea;
  250.   J->mcarea = NULL;
  251.   J->szallmcarea = 0;
  252.   while (mc) {
  253.     MCode *next = ((MCLink *)mc)->next;
  254.     mcode_free(J, mc, ((MCLink *)mc)->size);
  255.     mc = next;
  256.   }
  257. }

  258. /* -- MCode transactions -------------------------------------------------- */

  259. /* Reserve the remainder of the current MCode area. */
  260. MCode *lj_mcode_reserve(jit_State *J, MCode **lim)
  261. {
  262.   if (!J->mcarea)
  263.     mcode_allocarea(J);
  264.   else
  265.     mcode_protect(J, MCPROT_GEN);
  266.   *lim = J->mcbot;
  267.   return J->mctop;
  268. }

  269. /* Commit the top part of the current MCode area. */
  270. void lj_mcode_commit(jit_State *J, MCode *top)
  271. {
  272.   J->mctop = top;
  273.   mcode_protect(J, MCPROT_RUN);
  274. }

  275. /* Abort the reservation. */
  276. void lj_mcode_abort(jit_State *J)
  277. {
  278.   if (J->mcarea)
  279.     mcode_protect(J, MCPROT_RUN);
  280. }

  281. /* Set/reset protection to allow patching of MCode areas. */
  282. MCode *lj_mcode_patch(jit_State *J, MCode *ptr, int finish)
  283. {
  284. #ifdef LUAJIT_UNPROTECT_MCODE
  285.   UNUSED(J); UNUSED(ptr); UNUSED(finish);
  286.   return NULL;
  287. #else
  288.   if (finish) {
  289.     if (J->mcarea == ptr)
  290.       mcode_protect(J, MCPROT_RUN);
  291.     else if (LJ_UNLIKELY(mcode_setprot(ptr, ((MCLink *)ptr)->size, MCPROT_RUN)))
  292.       mcode_protfail(J);
  293.     return NULL;
  294.   } else {
  295.     MCode *mc = J->mcarea;
  296.     /* Try current area first to use the protection cache. */
  297.     if (ptr >= mc && ptr < (MCode *)((char *)mc + J->szmcarea)) {
  298.       mcode_protect(J, MCPROT_GEN);
  299.       return mc;
  300.     }
  301.     /* Otherwise search through the list of MCode areas. */
  302.     for (;;) {
  303.       mc = ((MCLink *)mc)->next;
  304.       lua_assert(mc != NULL);
  305.       if (ptr >= mc && ptr < (MCode *)((char *)mc + ((MCLink *)mc)->size)) {
  306.         if (LJ_UNLIKELY(mcode_setprot(mc, ((MCLink *)mc)->size, MCPROT_GEN)))
  307.           mcode_protfail(J);
  308.         return mc;
  309.       }
  310.     }
  311.   }
  312. #endif
  313. }

  314. /* Limit of MCode reservation reached. */
  315. void lj_mcode_limiterr(jit_State *J, size_t need)
  316. {
  317.   size_t sizemcode, maxmcode;
  318.   lj_mcode_abort(J);
  319.   sizemcode = (size_t)J->param[JIT_P_sizemcode] << 10;
  320.   sizemcode = (sizemcode + LJ_PAGESIZE-1) & ~(size_t)(LJ_PAGESIZE - 1);
  321.   maxmcode = (size_t)J->param[JIT_P_maxmcode] << 10;
  322.   if ((size_t)need > sizemcode)
  323.     lj_trace_err(J, LJ_TRERR_MCODEOV);  /* Too long for any area. */
  324.   if (J->szallmcarea + sizemcode > maxmcode)
  325.     lj_trace_err(J, LJ_TRERR_MCODEAL);
  326.   mcode_allocarea(J);
  327.   lj_trace_err(J, LJ_TRERR_MCODELM);  /* Retry with new area. */
  328. }

  329. #endif