src/lj_gc.c - luajit-2.0-src

Global variables defined

Data types defined

Functions defined

Macros defined

Source code

  1. /*
  2. ** Garbage collector.
  3. ** Copyright (C) 2005-2015 Mike Pall. See Copyright Notice in luajit.h
  4. **
  5. ** Major portions taken verbatim or adapted from the Lua interpreter.
  6. ** Copyright (C) 1994-2008 Lua.org, PUC-Rio. See Copyright Notice in lua.h
  7. */

  8. #define lj_gc_c
  9. #define LUA_CORE

  10. #include "lj_obj.h"
  11. #include "lj_gc.h"
  12. #include "lj_err.h"
  13. #include "lj_buf.h"
  14. #include "lj_str.h"
  15. #include "lj_tab.h"
  16. #include "lj_func.h"
  17. #include "lj_udata.h"
  18. #include "lj_meta.h"
  19. #include "lj_state.h"
  20. #include "lj_frame.h"
  21. #if LJ_HASFFI
  22. #include "lj_ctype.h"
  23. #include "lj_cdata.h"
  24. #endif
  25. #include "lj_trace.h"
  26. #include "lj_vm.h"

  27. #define GCSTEPSIZE        1024u
  28. #define GCSWEEPMAX        40
  29. #define GCSWEEPCOST        10
  30. #define GCFINALIZECOST        100

  31. /* Macros to set GCobj colors and flags. */
  32. #define white2gray(x)                ((x)->gch.marked &= (uint8_t)~LJ_GC_WHITES)
  33. #define gray2black(x)                ((x)->gch.marked |= LJ_GC_BLACK)
  34. #define isfinalized(u)                ((u)->marked & LJ_GC_FINALIZED)

  35. /* -- Mark phase ---------------------------------------------------------- */

  36. /* Mark a TValue (if needed). */
  37. #define gc_marktv(g, tv) \
  38.   { lua_assert(!tvisgcv(tv) || (~itype(tv) == gcval(tv)->gch.gct)); \
  39.     if (tviswhite(tv)) gc_mark(g, gcV(tv)); }

  40. /* Mark a GCobj (if needed). */
  41. #define gc_markobj(g, o) \
  42.   { if (iswhite(obj2gco(o))) gc_mark(g, obj2gco(o)); }

  43. /* Mark a string object. */
  44. #define gc_mark_str(s)                ((s)->marked &= (uint8_t)~LJ_GC_WHITES)

  45. /* Mark a white GCobj. */
  46. static void gc_mark(global_State *g, GCobj *o)
  47. {
  48.   int gct = o->gch.gct;
  49.   lua_assert(iswhite(o) && !isdead(g, o));
  50.   white2gray(o);
  51.   if (LJ_UNLIKELY(gct == ~LJ_TUDATA)) {
  52.     GCtab *mt = tabref(gco2ud(o)->metatable);
  53.     gray2black(o);  /* Userdata are never gray. */
  54.     if (mt) gc_markobj(g, mt);
  55.     gc_markobj(g, tabref(gco2ud(o)->env));
  56.   } else if (LJ_UNLIKELY(gct == ~LJ_TUPVAL)) {
  57.     GCupval *uv = gco2uv(o);
  58.     gc_marktv(g, uvval(uv));
  59.     if (uv->closed)
  60.       gray2black(o);  /* Closed upvalues are never gray. */
  61.   } else if (gct != ~LJ_TSTR && gct != ~LJ_TCDATA) {
  62.     lua_assert(gct == ~LJ_TFUNC || gct == ~LJ_TTAB ||
  63.                gct == ~LJ_TTHREAD || gct == ~LJ_TPROTO);
  64.     setgcrefr(o->gch.gclist, g->gc.gray);
  65.     setgcref(g->gc.gray, o);
  66.   }
  67. }

  68. /* Mark GC roots. */
  69. static void gc_mark_gcroot(global_State *g)
  70. {
  71.   ptrdiff_t i;
  72.   for (i = 0; i < GCROOT_MAX; i++)
  73.     if (gcref(g->gcroot[i]) != NULL)
  74.       gc_markobj(g, gcref(g->gcroot[i]));
  75. }

  76. /* Start a GC cycle and mark the root set. */
  77. static void gc_mark_start(global_State *g)
  78. {
  79.   setgcrefnull(g->gc.gray);
  80.   setgcrefnull(g->gc.grayagain);
  81.   setgcrefnull(g->gc.weak);
  82.   gc_markobj(g, mainthread(g));
  83.   gc_markobj(g, tabref(mainthread(g)->env));
  84.   gc_marktv(g, &g->registrytv);
  85.   gc_mark_gcroot(g);
  86.   g->gc.state = GCSpropagate;
  87. }

  88. /* Mark open upvalues. */
  89. static void gc_mark_uv(global_State *g)
  90. {
  91.   GCupval *uv;
  92.   for (uv = uvnext(&g->uvhead); uv != &g->uvhead; uv = uvnext(uv)) {
  93.     lua_assert(uvprev(uvnext(uv)) == uv && uvnext(uvprev(uv)) == uv);
  94.     if (isgray(obj2gco(uv)))
  95.       gc_marktv(g, uvval(uv));
  96.   }
  97. }

  98. /* Mark userdata in mmudata list. */
  99. static void gc_mark_mmudata(global_State *g)
  100. {
  101.   GCobj *root = gcref(g->gc.mmudata);
  102.   GCobj *u = root;
  103.   if (u) {
  104.     do {
  105.       u = gcnext(u);
  106.       makewhite(g, u);  /* Could be from previous GC. */
  107.       gc_mark(g, u);
  108.     } while (u != root);
  109.   }
  110. }

  111. /* Separate userdata objects to be finalized to mmudata list. */
  112. size_t lj_gc_separateudata(global_State *g, int all)
  113. {
  114.   size_t m = 0;
  115.   GCRef *p = &mainthread(g)->nextgc;
  116.   GCobj *o;
  117.   while ((o = gcref(*p)) != NULL) {
  118.     if (!(iswhite(o) || all) || isfinalized(gco2ud(o))) {
  119.       p = &o->gch.nextgc;  /* Nothing to do. */
  120.     } else if (!lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc)) {
  121.       markfinalized(o);  /* Done, as there's no __gc metamethod. */
  122.       p = &o->gch.nextgc;
  123.     } else/* Otherwise move userdata to be finalized to mmudata list. */
  124.       m += sizeudata(gco2ud(o));
  125.       markfinalized(o);
  126.       *p = o->gch.nextgc;
  127.       if (gcref(g->gc.mmudata)) {  /* Link to end of mmudata list. */
  128.         GCobj *root = gcref(g->gc.mmudata);
  129.         setgcrefr(o->gch.nextgc, root->gch.nextgc);
  130.         setgcref(root->gch.nextgc, o);
  131.         setgcref(g->gc.mmudata, o);
  132.       } else/* Create circular list. */
  133.         setgcref(o->gch.nextgc, o);
  134.         setgcref(g->gc.mmudata, o);
  135.       }
  136.     }
  137.   }
  138.   return m;
  139. }

  140. /* -- Propagation phase --------------------------------------------------- */

  141. /* Traverse a table. */
  142. static int gc_traverse_tab(global_State *g, GCtab *t)
  143. {
  144.   int weak = 0;
  145.   cTValue *mode;
  146.   GCtab *mt = tabref(t->metatable);
  147.   if (mt)
  148.     gc_markobj(g, mt);
  149.   mode = lj_meta_fastg(g, mt, MM_mode);
  150.   if (mode && tvisstr(mode)) {  /* Valid __mode field? */
  151.     const char *modestr = strVdata(mode);
  152.     int c;
  153.     while ((c = *modestr++)) {
  154.       if (c == 'k') weak |= LJ_GC_WEAKKEY;
  155.       else if (c == 'v') weak |= LJ_GC_WEAKVAL;
  156.       else if (c == 'K') weak = (int)(~0u & ~LJ_GC_WEAKVAL);
  157.     }
  158.     if (weak > 0) {  /* Weak tables are cleared in the atomic phase. */
  159.       t->marked = (uint8_t)((t->marked & ~LJ_GC_WEAK) | weak);
  160.       setgcrefr(t->gclist, g->gc.weak);
  161.       setgcref(g->gc.weak, obj2gco(t));
  162.     }
  163.   }
  164.   if (weak == LJ_GC_WEAK/* Nothing to mark if both keys/values are weak. */
  165.     return 1;
  166.   if (!(weak & LJ_GC_WEAKVAL)) {  /* Mark array part. */
  167.     MSize i, asize = t->asize;
  168.     for (i = 0; i < asize; i++)
  169.       gc_marktv(g, arrayslot(t, i));
  170.   }
  171.   if (t->hmask > 0) {  /* Mark hash part. */
  172.     Node *node = noderef(t->node);
  173.     MSize i, hmask = t->hmask;
  174.     for (i = 0; i <= hmask; i++) {
  175.       Node *n = &node[i];
  176.       if (!tvisnil(&n->val)) {  /* Mark non-empty slot. */
  177.         lua_assert(!tvisnil(&n->key));
  178.         if (!(weak & LJ_GC_WEAKKEY)) gc_marktv(g, &n->key);
  179.         if (!(weak & LJ_GC_WEAKVAL)) gc_marktv(g, &n->val);
  180.       }
  181.     }
  182.   }
  183.   return weak;
  184. }

  185. /* Traverse a function. */
  186. static void gc_traverse_func(global_State *g, GCfunc *fn)
  187. {
  188.   gc_markobj(g, tabref(fn->c.env));
  189.   if (isluafunc(fn)) {
  190.     uint32_t i;
  191.     lua_assert(fn->l.nupvalues <= funcproto(fn)->sizeuv);
  192.     gc_markobj(g, funcproto(fn));
  193.     for (i = 0; i < fn->l.nupvalues; i++)  /* Mark Lua function upvalues. */
  194.       gc_markobj(g, &gcref(fn->l.uvptr[i])->uv);
  195.   } else {
  196.     uint32_t i;
  197.     for (i = 0; i < fn->c.nupvalues; i++)  /* Mark C function upvalues. */
  198.       gc_marktv(g, &fn->c.upvalue[i]);
  199.   }
  200. }

  201. #if LJ_HASJIT
  202. /* Mark a trace. */
  203. static void gc_marktrace(global_State *g, TraceNo traceno)
  204. {
  205.   GCobj *o = obj2gco(traceref(G2J(g), traceno));
  206.   lua_assert(traceno != G2J(g)->cur.traceno);
  207.   if (iswhite(o)) {
  208.     white2gray(o);
  209.     setgcrefr(o->gch.gclist, g->gc.gray);
  210.     setgcref(g->gc.gray, o);
  211.   }
  212. }

  213. /* Traverse a trace. */
  214. static void gc_traverse_trace(global_State *g, GCtrace *T)
  215. {
  216.   IRRef ref;
  217.   if (T->traceno == 0) return;
  218.   for (ref = T->nk; ref < REF_TRUE; ref++) {
  219.     IRIns *ir = &T->ir[ref];
  220.     if (ir->o == IR_KGC)
  221.       gc_markobj(g, ir_kgc(ir));
  222.   }
  223.   if (T->link) gc_marktrace(g, T->link);
  224.   if (T->nextroot) gc_marktrace(g, T->nextroot);
  225.   if (T->nextside) gc_marktrace(g, T->nextside);
  226.   gc_markobj(g, gcref(T->startpt));
  227. }

  228. /* The current trace is a GC root while not anchored in the prototype (yet). */
  229. #define gc_traverse_curtrace(g)        gc_traverse_trace(g, &G2J(g)->cur)
  230. #else
  231. #define gc_traverse_curtrace(g)        UNUSED(g)
  232. #endif

  233. /* Traverse a prototype. */
  234. static void gc_traverse_proto(global_State *g, GCproto *pt)
  235. {
  236.   ptrdiff_t i;
  237.   gc_mark_str(proto_chunkname(pt));
  238.   for (i = -(ptrdiff_t)pt->sizekgc; i < 0; i++)  /* Mark collectable consts. */
  239.     gc_markobj(g, proto_kgc(pt, i));
  240. #if LJ_HASJIT
  241.   if (pt->trace) gc_marktrace(g, pt->trace);
  242. #endif
  243. }

  244. /* Traverse the frame structure of a stack. */
  245. static MSize gc_traverse_frames(global_State *g, lua_State *th)
  246. {
  247.   TValue *frame, *top = th->top-1, *bot = tvref(th->stack);
  248.   /* Note: extra vararg frame not skipped, marks function twice (harmless). */
  249.   for (frame = th->base-1; frame > bot+LJ_FR2; frame = frame_prev(frame)) {
  250.     GCfunc *fn = frame_func(frame);
  251.     TValue *ftop = frame;
  252.     if (isluafunc(fn)) ftop += funcproto(fn)->framesize;
  253.     if (ftop > top) top = ftop;
  254.     if (!LJ_FR2) gc_markobj(g, fn);  /* Need to mark hidden function (or L). */
  255.   }
  256.   top++;  /* Correct bias of -1 (frame == base-1). */
  257.   if (top > tvref(th->maxstack)) top = tvref(th->maxstack);
  258.   return (MSize)(top - bot);  /* Return minimum needed stack size. */
  259. }

  260. /* Traverse a thread object. */
  261. static void gc_traverse_thread(global_State *g, lua_State *th)
  262. {
  263.   TValue *o, *top = th->top;
  264.   for (o = tvref(th->stack)+1+LJ_FR2; o < top; o++)
  265.     gc_marktv(g, o);
  266.   if (g->gc.state == GCSatomic) {
  267.     top = tvref(th->stack) + th->stacksize;
  268.     for (; o < top; o++)  /* Clear unmarked slots. */
  269.       setnilV(o);
  270.   }
  271.   gc_markobj(g, tabref(th->env));
  272.   lj_state_shrinkstack(th, gc_traverse_frames(g, th));
  273. }

  274. /* Propagate one gray object. Traverse it and turn it black. */
  275. static size_t propagatemark(global_State *g)
  276. {
  277.   GCobj *o = gcref(g->gc.gray);
  278.   int gct = o->gch.gct;
  279.   lua_assert(isgray(o));
  280.   gray2black(o);
  281.   setgcrefr(g->gc.gray, o->gch.gclist);  /* Remove from gray list. */
  282.   if (LJ_LIKELY(gct == ~LJ_TTAB)) {
  283.     GCtab *t = gco2tab(o);
  284.     if (gc_traverse_tab(g, t) > 0)
  285.       black2gray(o);  /* Keep weak tables gray. */
  286.     return sizeof(GCtab) + sizeof(TValue) * t->asize +
  287.                            sizeof(Node) * (t->hmask + 1);
  288.   } else if (LJ_LIKELY(gct == ~LJ_TFUNC)) {
  289.     GCfunc *fn = gco2func(o);
  290.     gc_traverse_func(g, fn);
  291.     return isluafunc(fn) ? sizeLfunc((MSize)fn->l.nupvalues) :
  292.                            sizeCfunc((MSize)fn->c.nupvalues);
  293.   } else if (LJ_LIKELY(gct == ~LJ_TPROTO)) {
  294.     GCproto *pt = gco2pt(o);
  295.     gc_traverse_proto(g, pt);
  296.     return pt->sizept;
  297.   } else if (LJ_LIKELY(gct == ~LJ_TTHREAD)) {
  298.     lua_State *th = gco2th(o);
  299.     setgcrefr(th->gclist, g->gc.grayagain);
  300.     setgcref(g->gc.grayagain, o);
  301.     black2gray(o);  /* Threads are never black. */
  302.     gc_traverse_thread(g, th);
  303.     return sizeof(lua_State) + sizeof(TValue) * th->stacksize;
  304.   } else {
  305. #if LJ_HASJIT
  306.     GCtrace *T = gco2trace(o);
  307.     gc_traverse_trace(g, T);
  308.     return ((sizeof(GCtrace)+7)&~7) + (T->nins-T->nk)*sizeof(IRIns) +
  309.            T->nsnap*sizeof(SnapShot) + T->nsnapmap*sizeof(SnapEntry);
  310. #else
  311.     lua_assert(0);
  312.     return 0;
  313. #endif
  314.   }
  315. }

  316. /* Propagate all gray objects. */
  317. static size_t gc_propagate_gray(global_State *g)
  318. {
  319.   size_t m = 0;
  320.   while (gcref(g->gc.gray) != NULL)
  321.     m += propagatemark(g);
  322.   return m;
  323. }

  324. /* -- Sweep phase --------------------------------------------------------- */

  325. /* Type of GC free functions. */
  326. typedef void (LJ_FASTCALL *GCFreeFunc)(global_State *g, GCobj *o);

  327. /* GC free functions for LJ_TSTR .. LJ_TUDATA. ORDER LJ_T */
  328. static const GCFreeFunc gc_freefunc[] = {
  329.   (GCFreeFunc)lj_str_free,
  330.   (GCFreeFunc)lj_func_freeuv,
  331.   (GCFreeFunc)lj_state_free,
  332.   (GCFreeFunc)lj_func_freeproto,
  333.   (GCFreeFunc)lj_func_free,
  334. #if LJ_HASJIT
  335.   (GCFreeFunc)lj_trace_free,
  336. #else
  337.   (GCFreeFunc)0,
  338. #endif
  339. #if LJ_HASFFI
  340.   (GCFreeFunc)lj_cdata_free,
  341. #else
  342.   (GCFreeFunc)0,
  343. #endif
  344.   (GCFreeFunc)lj_tab_free,
  345.   (GCFreeFunc)lj_udata_free
  346. };

  347. /* Full sweep of a GC list. */
  348. #define gc_fullsweep(g, p)        gc_sweep(g, (p), ~(uint32_t)0)

  349. /* Partial sweep of a GC list. */
  350. static GCRef *gc_sweep(global_State *g, GCRef *p, uint32_t lim)
  351. {
  352.   /* Mask with other white and LJ_GC_FIXED. Or LJ_GC_SFIXED on shutdown. */
  353.   int ow = otherwhite(g);
  354.   GCobj *o;
  355.   while ((o = gcref(*p)) != NULL && lim-- > 0) {
  356.     if (o->gch.gct == ~LJ_TTHREAD/* Need to sweep open upvalues, too. */
  357.       gc_fullsweep(g, &gco2th(o)->openupval);
  358.     if (((o->gch.marked ^ LJ_GC_WHITES) & ow)) {  /* Black or current white? */
  359.       lua_assert(!isdead(g, o) || (o->gch.marked & LJ_GC_FIXED));
  360.       makewhite(g, o);  /* Value is alive, change to the current white. */
  361.       p = &o->gch.nextgc;
  362.     } else/* Otherwise value is dead, free it. */
  363.       lua_assert(isdead(g, o) || ow == LJ_GC_SFIXED);
  364.       setgcrefr(*p, o->gch.nextgc);
  365.       if (o == gcref(g->gc.root))
  366.         setgcrefr(g->gc.root, o->gch.nextgc);  /* Adjust list anchor. */
  367.       gc_freefunc[o->gch.gct - ~LJ_TSTR](g, o);
  368.     }
  369.   }
  370.   return p;
  371. }

  372. /* Check whether we can clear a key or a value slot from a table. */
  373. static int gc_mayclear(cTValue *o, int val)
  374. {
  375.   if (tvisgcv(o)) {  /* Only collectable objects can be weak references. */
  376.     if (tvisstr(o)) {  /* But strings cannot be used as weak references. */
  377.       gc_mark_str(strV(o));  /* And need to be marked. */
  378.       return 0;
  379.     }
  380.     if (iswhite(gcV(o)))
  381.       return 1/* Object is about to be collected. */
  382.     if (tvisudata(o) && val && isfinalized(udataV(o)))
  383.       return 1/* Finalized userdata is dropped only from values. */
  384.   }
  385.   return 0/* Cannot clear. */
  386. }

  387. /* Clear collected entries from weak tables. */
  388. static void gc_clearweak(GCobj *o)
  389. {
  390.   while (o) {
  391.     GCtab *t = gco2tab(o);
  392.     lua_assert((t->marked & LJ_GC_WEAK));
  393.     if ((t->marked & LJ_GC_WEAKVAL)) {
  394.       MSize i, asize = t->asize;
  395.       for (i = 0; i < asize; i++) {
  396.         /* Clear array slot when value is about to be collected. */
  397.         TValue *tv = arrayslot(t, i);
  398.         if (gc_mayclear(tv, 1))
  399.           setnilV(tv);
  400.       }
  401.     }
  402.     if (t->hmask > 0) {
  403.       Node *node = noderef(t->node);
  404.       MSize i, hmask = t->hmask;
  405.       for (i = 0; i <= hmask; i++) {
  406.         Node *n = &node[i];
  407.         /* Clear hash slot when key or value is about to be collected. */
  408.         if (!tvisnil(&n->val) && (gc_mayclear(&n->key, 0) ||
  409.                                   gc_mayclear(&n->val, 1)))
  410.           setnilV(&n->val);
  411.       }
  412.     }
  413.     o = gcref(t->gclist);
  414.   }
  415. }

  416. /* Call a userdata or cdata finalizer. */
  417. static void gc_call_finalizer(global_State *g, lua_State *L,
  418.                               cTValue *mo, GCobj *o)
  419. {
  420.   /* Save and restore lots of state around the __gc callback. */
  421.   uint8_t oldh = hook_save(g);
  422.   GCSize oldt = g->gc.threshold;
  423.   int errcode;
  424.   TValue *top;
  425.   lj_trace_abort(g);
  426.   hook_entergc(g);  /* Disable hooks and new traces during __gc. */
  427.   g->gc.threshold = LJ_MAX_MEM/* Prevent GC steps. */
  428.   top = L->top;
  429.   copyTV(L, top++, mo);
  430.   if (LJ_FR2) setnilV(top++);
  431.   setgcV(L, top, o, ~o->gch.gct);
  432.   L->top = top+1;
  433.   errcode = lj_vm_pcall(L, top, 1+0, -1);  /* Stack: |mo|o| -> | */
  434.   hook_restore(g, oldh);
  435.   g->gc.threshold = oldt;  /* Restore GC threshold. */
  436.   if (errcode)
  437.     lj_err_throw(L, errcode);  /* Propagate errors. */
  438. }

  439. /* Finalize one userdata or cdata object from the mmudata list. */
  440. static void gc_finalize(lua_State *L)
  441. {
  442.   global_State *g = G(L);
  443.   GCobj *o = gcnext(gcref(g->gc.mmudata));
  444.   cTValue *mo;
  445.   lua_assert(tvref(g->jit_base) == NULL);  /* Must not be called on trace. */
  446.   /* Unchain from list of userdata to be finalized. */
  447.   if (o == gcref(g->gc.mmudata))
  448.     setgcrefnull(g->gc.mmudata);
  449.   else
  450.     setgcrefr(gcref(g->gc.mmudata)->gch.nextgc, o->gch.nextgc);
  451. #if LJ_HASFFI
  452.   if (o->gch.gct == ~LJ_TCDATA) {
  453.     TValue tmp, *tv;
  454.     /* Add cdata back to the GC list and make it white. */
  455.     setgcrefr(o->gch.nextgc, g->gc.root);
  456.     setgcref(g->gc.root, o);
  457.     makewhite(g, o);
  458.     o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
  459.     /* Resolve finalizer. */
  460.     setcdataV(L, &tmp, gco2cd(o));
  461.     tv = lj_tab_set(L, ctype_ctsG(g)->finalizer, &tmp);
  462.     if (!tvisnil(tv)) {
  463.       g->gc.nocdatafin = 0;
  464.       copyTV(L, &tmp, tv);
  465.       setnilV(tv);  /* Clear entry in finalizer table. */
  466.       gc_call_finalizer(g, L, &tmp, o);
  467.     }
  468.     return;
  469.   }
  470. #endif
  471.   /* Add userdata back to the main userdata list and make it white. */
  472.   setgcrefr(o->gch.nextgc, mainthread(g)->nextgc);
  473.   setgcref(mainthread(g)->nextgc, o);
  474.   makewhite(g, o);
  475.   /* Resolve the __gc metamethod. */
  476.   mo = lj_meta_fastg(g, tabref(gco2ud(o)->metatable), MM_gc);
  477.   if (mo)
  478.     gc_call_finalizer(g, L, mo, o);
  479. }

  480. /* Finalize all userdata objects from mmudata list. */
  481. void lj_gc_finalize_udata(lua_State *L)
  482. {
  483.   while (gcref(G(L)->gc.mmudata) != NULL)
  484.     gc_finalize(L);
  485. }

  486. #if LJ_HASFFI
  487. /* Finalize all cdata objects from finalizer table. */
  488. void lj_gc_finalize_cdata(lua_State *L)
  489. {
  490.   global_State *g = G(L);
  491.   CTState *cts = ctype_ctsG(g);
  492.   if (cts) {
  493.     GCtab *t = cts->finalizer;
  494.     Node *node = noderef(t->node);
  495.     ptrdiff_t i;
  496.     setgcrefnull(t->metatable);  /* Mark finalizer table as disabled. */
  497.     for (i = (ptrdiff_t)t->hmask; i >= 0; i--)
  498.       if (!tvisnil(&node[i].val) && tviscdata(&node[i].key)) {
  499.         GCobj *o = gcV(&node[i].key);
  500.         TValue tmp;
  501.         makewhite(g, o);
  502.         o->gch.marked &= (uint8_t)~LJ_GC_CDATA_FIN;
  503.         copyTV(L, &tmp, &node[i].val);
  504.         setnilV(&node[i].val);
  505.         gc_call_finalizer(g, L, &tmp, o);
  506.       }
  507.   }
  508. }
  509. #endif

  510. /* Free all remaining GC objects. */
  511. void lj_gc_freeall(global_State *g)
  512. {
  513.   MSize i, strmask;
  514.   /* Free everything, except super-fixed objects (the main thread). */
  515.   g->gc.currentwhite = LJ_GC_WHITES | LJ_GC_SFIXED;
  516.   gc_fullsweep(g, &g->gc.root);
  517.   strmask = g->strmask;
  518.   for (i = 0; i <= strmask; i++)  /* Free all string hash chains. */
  519.     gc_fullsweep(g, &g->strhash[i]);
  520. }

  521. /* -- Collector ----------------------------------------------------------- */

  522. /* Atomic part of the GC cycle, transitioning from mark to sweep phase. */
  523. static void atomic(global_State *g, lua_State *L)
  524. {
  525.   size_t udsize;

  526.   gc_mark_uv(g);  /* Need to remark open upvalues (the thread may be dead). */
  527.   gc_propagate_gray(g);  /* Propagate any left-overs. */

  528.   setgcrefr(g->gc.gray, g->gc.weak);  /* Empty the list of weak tables. */
  529.   setgcrefnull(g->gc.weak);
  530.   lua_assert(!iswhite(obj2gco(mainthread(g))));
  531.   gc_markobj(g, L);  /* Mark running thread. */
  532.   gc_traverse_curtrace(g);  /* Traverse current trace. */
  533.   gc_mark_gcroot(g);  /* Mark GC roots (again). */
  534.   gc_propagate_gray(g);  /* Propagate all of the above. */

  535.   setgcrefr(g->gc.gray, g->gc.grayagain);  /* Empty the 2nd chance list. */
  536.   setgcrefnull(g->gc.grayagain);
  537.   gc_propagate_gray(g);  /* Propagate it. */

  538.   udsize = lj_gc_separateudata(g, 0);  /* Separate userdata to be finalized. */
  539.   gc_mark_mmudata(g);  /* Mark them. */
  540.   udsize += gc_propagate_gray(g);  /* And propagate the marks. */

  541.   /* All marking done, clear weak tables. */
  542.   gc_clearweak(gcref(g->gc.weak));

  543.   lj_buf_shrink(L, &g->tmpbuf);  /* Shrink temp buffer. */

  544.   /* Prepare for sweep phase. */
  545.   g->gc.currentwhite = (uint8_t)otherwhite(g);  /* Flip current white. */
  546.   g->strempty.marked = g->gc.currentwhite;
  547.   setmref(g->gc.sweep, &g->gc.root);
  548.   g->gc.estimate = g->gc.total - (GCSize)udsize;  /* Initial estimate. */
  549. }

  550. /* GC state machine. Returns a cost estimate for each step performed. */
  551. static size_t gc_onestep(lua_State *L)
  552. {
  553.   global_State *g = G(L);
  554.   switch (g->gc.state) {
  555.   case GCSpause:
  556.     gc_mark_start(g);  /* Start a new GC cycle by marking all GC roots. */
  557.     return 0;
  558.   case GCSpropagate:
  559.     if (gcref(g->gc.gray) != NULL)
  560.       return propagatemark(g);  /* Propagate one gray object. */
  561.     g->gc.state = GCSatomic;  /* End of mark phase. */
  562.     return 0;
  563.   case GCSatomic:
  564.     if (tvref(g->jit_base))  /* Don't run atomic phase on trace. */
  565.       return LJ_MAX_MEM;
  566.     atomic(g, L);
  567.     g->gc.state = GCSsweepstring;  /* Start of sweep phase. */
  568.     g->gc.sweepstr = 0;
  569.     return 0;
  570.   case GCSsweepstring: {
  571.     GCSize old = g->gc.total;
  572.     gc_fullsweep(g, &g->strhash[g->gc.sweepstr++]);  /* Sweep one chain. */
  573.     if (g->gc.sweepstr > g->strmask)
  574.       g->gc.state = GCSsweep;  /* All string hash chains sweeped. */
  575.     lua_assert(old >= g->gc.total);
  576.     g->gc.estimate -= old - g->gc.total;
  577.     return GCSWEEPCOST;
  578.     }
  579.   case GCSsweep: {
  580.     GCSize old = g->gc.total;
  581.     setmref(g->gc.sweep, gc_sweep(g, mref(g->gc.sweep, GCRef), GCSWEEPMAX));
  582.     lua_assert(old >= g->gc.total);
  583.     g->gc.estimate -= old - g->gc.total;
  584.     if (gcref(*mref(g->gc.sweep, GCRef)) == NULL) {
  585.       if (g->strnum <= (g->strmask >> 2) && g->strmask > LJ_MIN_STRTAB*2-1)
  586.         lj_str_resize(L, g->strmask >> 1);  /* Shrink string table. */
  587.       if (gcref(g->gc.mmudata)) {  /* Need any finalizations? */
  588.         g->gc.state = GCSfinalize;
  589. #if LJ_HASFFI
  590.         g->gc.nocdatafin = 1;
  591. #endif
  592.       } else/* Otherwise skip this phase to help the JIT. */
  593.         g->gc.state = GCSpause;  /* End of GC cycle. */
  594.         g->gc.debt = 0;
  595.       }
  596.     }
  597.     return GCSWEEPMAX*GCSWEEPCOST;
  598.     }
  599.   case GCSfinalize:
  600.     if (gcref(g->gc.mmudata) != NULL) {
  601.       if (tvref(g->jit_base))  /* Don't call finalizers on trace. */
  602.         return LJ_MAX_MEM;
  603.       gc_finalize(L);  /* Finalize one userdata object. */
  604.       if (g->gc.estimate > GCFINALIZECOST)
  605.         g->gc.estimate -= GCFINALIZECOST;
  606.       return GCFINALIZECOST;
  607.     }
  608. #if LJ_HASFFI
  609.     if (!g->gc.nocdatafin) lj_tab_rehash(L, ctype_ctsG(g)->finalizer);
  610. #endif
  611.     g->gc.state = GCSpause;  /* End of GC cycle. */
  612.     g->gc.debt = 0;
  613.     return 0;
  614.   default:
  615.     lua_assert(0);
  616.     return 0;
  617.   }
  618. }

  619. /* Perform a limited amount of incremental GC steps. */
  620. int LJ_FASTCALL lj_gc_step(lua_State *L)
  621. {
  622.   global_State *g = G(L);
  623.   GCSize lim;
  624.   int32_t ostate = g->vmstate;
  625.   setvmstate(g, GC);
  626.   lim = (GCSTEPSIZE/100) * g->gc.stepmul;
  627.   if (lim == 0)
  628.     lim = LJ_MAX_MEM;
  629.   if (g->gc.total > g->gc.threshold)
  630.     g->gc.debt += g->gc.total - g->gc.threshold;
  631.   do {
  632.     lim -= (GCSize)gc_onestep(L);
  633.     if (g->gc.state == GCSpause) {
  634.       g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
  635.       g->vmstate = ostate;
  636.       return 1/* Finished a GC cycle. */
  637.     }
  638.   } while (sizeof(lim) == 8 ? ((int64_t)lim > 0) : ((int32_t)lim > 0));
  639.   if (g->gc.debt < GCSTEPSIZE) {
  640.     g->gc.threshold = g->gc.total + GCSTEPSIZE;
  641.     g->vmstate = ostate;
  642.     return -1;
  643.   } else {
  644.     g->gc.debt -= GCSTEPSIZE;
  645.     g->gc.threshold = g->gc.total;
  646.     g->vmstate = ostate;
  647.     return 0;
  648.   }
  649. }

  650. /* Ditto, but fix the stack top first. */
  651. void LJ_FASTCALL lj_gc_step_fixtop(lua_State *L)
  652. {
  653.   if (curr_funcisL(L)) L->top = curr_topL(L);
  654.   lj_gc_step(L);
  655. }

  656. #if LJ_HASJIT
  657. /* Perform multiple GC steps. Called from JIT-compiled code. */
  658. int LJ_FASTCALL lj_gc_step_jit(global_State *g, MSize steps)
  659. {
  660.   lua_State *L = gco2th(gcref(g->cur_L));
  661.   L->base = tvref(G(L)->jit_base);
  662.   L->top = curr_topL(L);
  663.   while (steps-- > 0 && lj_gc_step(L) == 0)
  664.     ;
  665.   /* Return 1 to force a trace exit. */
  666.   return (G(L)->gc.state == GCSatomic || G(L)->gc.state == GCSfinalize);
  667. }
  668. #endif

  669. /* Perform a full GC cycle. */
  670. void lj_gc_fullgc(lua_State *L)
  671. {
  672.   global_State *g = G(L);
  673.   int32_t ostate = g->vmstate;
  674.   setvmstate(g, GC);
  675.   if (g->gc.state <= GCSatomic) {  /* Caught somewhere in the middle. */
  676.     setmref(g->gc.sweep, &g->gc.root);  /* Sweep everything (preserving it). */
  677.     setgcrefnull(g->gc.gray);  /* Reset lists from partial propagation. */
  678.     setgcrefnull(g->gc.grayagain);
  679.     setgcrefnull(g->gc.weak);
  680.     g->gc.state = GCSsweepstring;  /* Fast forward to the sweep phase. */
  681.     g->gc.sweepstr = 0;
  682.   }
  683.   while (g->gc.state == GCSsweepstring || g->gc.state == GCSsweep)
  684.     gc_onestep(L);  /* Finish sweep. */
  685.   lua_assert(g->gc.state == GCSfinalize || g->gc.state == GCSpause);
  686.   /* Now perform a full GC. */
  687.   g->gc.state = GCSpause;
  688.   do { gc_onestep(L); } while (g->gc.state != GCSpause);
  689.   g->gc.threshold = (g->gc.estimate/100) * g->gc.pause;
  690.   g->vmstate = ostate;
  691. }

  692. /* -- Write barriers ------------------------------------------------------ */

  693. /* Move the GC propagation frontier forward. */
  694. void lj_gc_barrierf(global_State *g, GCobj *o, GCobj *v)
  695. {
  696.   lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
  697.   lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
  698.   lua_assert(o->gch.gct != ~LJ_TTAB);
  699.   /* Preserve invariant during propagation. Otherwise it doesn't matter. */
  700.   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  701.     gc_mark(g, v);  /* Move frontier forward. */
  702.   else
  703.     makewhite(g, o);  /* Make it white to avoid the following barrier. */
  704. }

  705. /* Specialized barrier for closed upvalue. Pass &uv->tv. */
  706. void LJ_FASTCALL lj_gc_barrieruv(global_State *g, TValue *tv)
  707. {
  708. #define TV2MARKED(x) \
  709.   (*((uint8_t *)(x) - offsetof(GCupval, tv) + offsetof(GCupval, marked)))
  710.   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  711.     gc_mark(g, gcV(tv));
  712.   else
  713.     TV2MARKED(tv) = (TV2MARKED(tv) & (uint8_t)~LJ_GC_COLORS) | curwhite(g);
  714. #undef TV2MARKED
  715. }

  716. /* Close upvalue. Also needs a write barrier. */
  717. void lj_gc_closeuv(global_State *g, GCupval *uv)
  718. {
  719.   GCobj *o = obj2gco(uv);
  720.   /* Copy stack slot to upvalue itself and point to the copy. */
  721.   copyTV(mainthread(g), &uv->tv, uvval(uv));
  722.   setmref(uv->v, &uv->tv);
  723.   uv->closed = 1;
  724.   setgcrefr(o->gch.nextgc, g->gc.root);
  725.   setgcref(g->gc.root, o);
  726.   if (isgray(o)) {  /* A closed upvalue is never gray, so fix this. */
  727.     if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic) {
  728.       gray2black(o);  /* Make it black and preserve invariant. */
  729.       if (tviswhite(&uv->tv))
  730.         lj_gc_barrierf(g, o, gcV(&uv->tv));
  731.     } else {
  732.       makewhite(g, o);  /* Make it white, i.e. sweep the upvalue. */
  733.       lua_assert(g->gc.state != GCSfinalize && g->gc.state != GCSpause);
  734.     }
  735.   }
  736. }

  737. #if LJ_HASJIT
  738. /* Mark a trace if it's saved during the propagation phase. */
  739. void lj_gc_barriertrace(global_State *g, uint32_t traceno)
  740. {
  741.   if (g->gc.state == GCSpropagate || g->gc.state == GCSatomic)
  742.     gc_marktrace(g, traceno);
  743. }
  744. #endif

  745. /* -- Allocator ----------------------------------------------------------- */

  746. /* Call pluggable memory allocator to allocate or resize a fragment. */
  747. void *lj_mem_realloc(lua_State *L, void *p, GCSize osz, GCSize nsz)
  748. {
  749.   global_State *g = G(L);
  750.   lua_assert((osz == 0) == (p == NULL));
  751.   p = g->allocf(g->allocd, p, osz, nsz);
  752.   if (p == NULL && nsz > 0)
  753.     lj_err_mem(L);
  754.   lua_assert((nsz == 0) == (p == NULL));
  755.   lua_assert(checkptrGC(p));
  756.   g->gc.total = (g->gc.total - osz) + nsz;
  757.   return p;
  758. }

  759. /* Allocate new GC object and link it to the root set. */
  760. void * LJ_FASTCALL lj_mem_newgco(lua_State *L, GCSize size)
  761. {
  762.   global_State *g = G(L);
  763.   GCobj *o = (GCobj *)g->allocf(g->allocd, NULL, 0, size);
  764.   if (o == NULL)
  765.     lj_err_mem(L);
  766.   lua_assert(checkptrGC(o));
  767.   g->gc.total += size;
  768.   setgcrefr(o->gch.nextgc, g->gc.root);
  769.   setgcref(g->gc.root, o);
  770.   newwhite(g, o);
  771.   return o;
  772. }

  773. /* Resize growable vector. */
  774. void *lj_mem_grow(lua_State *L, void *p, MSize *szp, MSize lim, MSize esz)
  775. {
  776.   MSize sz = (*szp) << 1;
  777.   if (sz < LJ_MIN_VECSZ)
  778.     sz = LJ_MIN_VECSZ;
  779.   if (sz > lim)
  780.     sz = lim;
  781.   p = lj_mem_realloc(L, p, (*szp)*esz, sz*esz);
  782.   *szp = sz;
  783.   return p;
  784. }